diff --git a/NOTICE b/NOTICE index 157dc7e4d..02e5d2ea2 100644 --- a/NOTICE +++ b/NOTICE @@ -20,8 +20,8 @@ SECTION 1: BSD-STYLE, MIT-STYLE, OR SIMILAR STYLE LICENSES SECTION 2: Apache License, V2.0 - >>> fastutil-6.6.1 - >>> hadoop-common-2.4.0 + >>> koloboke-1.0.0 + >>> hadoop-common-2.7.3 >>> swagger-ui-2.0.17 @@ -191,24 +191,22 @@ Copyright (C) 2006-2009 Dustin Sallings Apache License, V2.0 is applicable to the following component(s). ->>> fastutil-6.6.1 +>>> koloboke-1.0.0 -* Copyright (C) 2002-2014 Sebastiano Vigna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ->>> hadoop-common-2.4.0 +>>> hadoop-common-2.7.3 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 8d5f4c981..24a629e8a 100644 --- a/README.md +++ b/README.md @@ -21,9 +21,8 @@ Some details on the directories in GemFire layout are mentioned below: gemfire-shared/src/main/java ===> Code shared between GemFire and GemFireXD client. - gemfire-joptsimple/src/main/java ===> A copy of JOpt Simple adapted for GemFire. - - gemfire-json/src/main/java ===> A copy of org.json adapted for GemFire. + gemfire-util/src/main/java ===> Utility classes including copy of JOpt Simple and org.json + adapted for GemFire. gemfire-core/src/main/java ===> Core GemFire code. diff --git a/build.gradle b/build.gradle index 79210bad9..2276671f6 100644 --- a/build.gradle +++ b/build.gradle @@ -38,11 +38,10 @@ allprojects { ext { scalaBinaryVersion = '2.11' - scalaVersion = scalaBinaryVersion + '.8' sparkVersion = '2.1.1' springVersion = '3.2.17.RELEASE' log4jVersion = '1.2.17' - slf4jVersion = '1.7.21' + slf4jVersion = '1.7.25' junitVersion = '4.12' antVersion = '1.9.7' pxfVersion = '2.5.1.0' @@ -50,21 +49,22 @@ allprojects { jettyVersion = '9.2.22.v20170606' hadoopVersion = '2.7.3' protobufVersion = '2.6.1' - kryoVersion = '4.0.0' + kryoVersion = '4.0.1' thriftVersion = '0.9.3' - sunJerseyVersion = '1.19.1' - hadoopJettyVersion = '6.1.26' - jsr305Version = '3.0.1' + jerseyVersion = '2.26' + jsr305Version = '3.0.2' servletAPIVersion = '3.1.0' derbyVersion = '10.12.1.1' - hbaseVersion = '0.98.17-hadoop2' + // hbaseVersion = '0.98.17-hadoop2' + hbaseVersion = '0.98.24-hadoop2' + hadoopJettyVersion = '6.1.26' + sunJerseyVersion = '1.19.4' guavaVersion = '14.0.1' - nettyAllVersion = '4.0.42.Final' - jlineVersion = '2.14.2' - fastutilVersion = '8.1.0' - //hbaseVersion = '0.94.4-gemfire-r45047' - //hadoopVersion = '2.2.0-gphd-3.1.0.0' - //hadoopVersion = '2.4.1-gphd-3.2.0.0-54' + nettyAllVersion = '4.0.51.Final' + jlineVersion = '2.14.5' + jackson1Version = '1.9.13' + kolobokeVersion = '1.0.0' + kolobokeCompileVersion = '0.5.1' // product and release properties PRODUCT_NAME = 'SnappyData RowStore' @@ -232,132 +232,36 @@ def getStackTrace(def t) { return sw.toString() } -task cleanTestOutput << { +def now() { + return new Date().format('yyyy-MM-dd HH:mm:ss.SSS Z') +} + +task cleanTestOutput { doLast { def testDir = "${testResultsBase}/junit" delete testDir file(testDir).mkdirs() -} -task cleanDUnitOutput << { +} } +task cleanDUnitOutput { doLast { def testDir = "${testResultsBase}/dunit" delete testDir file(testDir).mkdirs() // writeTestProperties(testDir, '.') -} -task cleanWanOutput << { +} } +task cleanWanOutput { doLast { def testDir = "${testResultsBase}/wan" delete testDir file(testDir).mkdirs() -} -task cleanIntegrationOutput << { +} } +task cleanIntegrationOutput { doLast { def testDir = "${testResultsBase}/integration" delete testDir file(testDir).mkdirs() -} -task cleanReports << { +} } +task cleanReports { doLast { def reportsDir = "${testResultsBase}/combined-reports" delete reportsDir file(reportsDir).mkdirs() -} - - -def parallelTests(def proj, def testClasses, def testObj, def prefix, def splitSize, def parallelism) { - proj.task("${prefix}Parallel") { - dependsOn proj.testClasses, "${subprojectBase}storeProduct" - def ntestClasses = testClasses.size() - def testClassesGroups = testClasses.collate(splitSize) - def ntestClassesGroups = testClassesGroups.size() - def lastTestClass = new ThreadLocal() - def testCount = new java.util.concurrent.atomic.AtomicInteger(0) - def failureCount = new java.util.concurrent.atomic.AtomicInteger(0) - def testTasks = (0.. - proj.task("${prefix}Parallel${index}", type: Test) { - maxParallelForks = 1 - minHeapSize = testObj.minHeapSize - maxHeapSize = testObj.maxHeapSize - - def baseURI = getTestClassesDir().toURI() - include testClassesGroups[index].collect { - baseURI.relativize(it.toURI()).getPath() - } - - workingDir = testObj.workingDir - binResultsDir = file("${testObj.binResultsDir}_${index}") - reports.html.enabled = false - reports.junitXml.destination = file(workingDir) - - systemProperties testObj.systemProperties - - beforeTest { desc -> - def lastClass = lastTestClass.get() - if (desc.className != lastClass) { - def count = testCount.incrementAndGet() - def now = new Date().format('yyyy-MM-dd HH:mm:ss.SSS Z') - def threadId = Long.toHexString(Thread.currentThread().getId()) - if (lastClass != null) { - println "$now <0x$threadId> END $lastClass" - } - println "$now <0x$threadId> Start ${desc.className} ($count/$ntestClasses)" - lastTestClass.set(desc.className) - } - } - afterTest { desc, result -> - if (result.exceptions.size() > 0) { - failureCount.incrementAndGet() - } - } - } - } - doLast { - if (ntestClasses > 0) { - def count = new java.util.concurrent.atomic.AtomicInteger(0) - def threadProcess = new Runnable() { - @Override - void run() { - def index = 0 - def thread = Thread.currentThread() - while ((index = count.getAndIncrement()) < ntestClassesGroups) { - def now = new Date().format('yyyy-MM-dd HH:mm:ss.SSS Z') - def threadId = Long.toHexString(Thread.currentThread().getId()) - def test = testTasks[index] - println "$now <0x$threadId> STARTING tests: ${test.getIncludes()}" - try { - test.executeTests() - def lastClass = lastTestClass.get() - if (lastClass != null) { - println "$now <0x$threadId> END $lastClass" - } - } catch (Exception e) { - print "Exception in thread '${thread.getName()}' $e " - e.printStackTrace() - } - now = new Date().format('yyyy-MM-dd HH:mm:ss.SSS Z') - println "$now <0x$threadId> ENDED tests: ${test.getIncludes()}" - } - } - } - def threads = new Thread[parallelism] - parallelism.times { threads[it] = new Thread(threadProcess) } - threads.each { it.start() } - threads.each { it.join() } - - def progressTxt = file("${testObj.workingDir}/progress.txt").getAbsolutePath().toURI() - def report = file("${testObj.workingDir}/html/${proj.name}/index.html").getAbsolutePath().toURI() - if (failureCount.get() > 0) { - println() - def failureMsg = "FAILED: There were ${failureCount.get()} failures.\n" - failureMsg += " See the progress report in: file://$progressTxt\n" - failureMsg += " HTML report in: file://$report" - throw new GradleException(failureMsg) - } else { - println() - println("SUCCESS: See the progress report in: file://$progressTxt") - println(" HTML report in: file://$report") - println() - } - } - } - } -} +} } subprojects { @@ -378,11 +282,6 @@ subprojects { } configurations { - provided { - description 'a dependency that is provided externally at runtime' - visible true - } - testOutput { extendsFrom testCompile description 'a dependency that exposes test artifacts' @@ -405,50 +304,33 @@ subprojects { } } + sourceSets { + test.compileClasspath += configurations.compileOnly + test.runtimeClasspath += configurations.compileOnly + } + + cleanEclipse.doLast { + delete '.settings/org.eclipse.core.resources.prefs' + } + eclipse { classpath { - defaultOutputDir = file('build-eclipse') - plusConfigurations += [ configurations.provided ] + defaultOutputDir = file('build-artifacts/eclipse') + downloadSources = true } // Several files have UTF-8 encoding and Eclipse running on Windows // will have trouble unless we tell it to use UTF-8 encoding. // This setting needs to go into the core.resources.prefs file, // which the JDT script isn't set up to configure - eclipseJdt << { + eclipseJdt.doLast { File f = file('.settings/org.eclipse.core.resources.prefs') f.write('eclipse.preferences.version=1\n') f.append('encoding/=utf-8') } } - cleanEclipse << { - delete '.settings/org.eclipse.core.resources.prefs' - } - - idea { - module { - scopes.PROVIDED.plus += [ configurations.provided ] - } - } - eclipse { - classpath { - defaultOutputDir = file('build-artifacts/eclipse') - downloadSources = true - plusConfigurations += [ configurations.provided ] - } - } - - sourceSets { - main.compileClasspath += configurations.provided - main.runtimeClasspath -= configurations.provided - test.compileClasspath += configurations.provided - test.runtimeClasspath += configurations.provided - } - - javadoc.classpath += configurations.provided - dependencies { - provided 'com.google.code.findbugs:annotations:3.0.1' + compileOnly 'com.google.code.findbugs:annotations:3.0.1' compile "log4j:log4j:${log4jVersion}" compile "org.slf4j:slf4j-api:${slf4jVersion}" compile "org.slf4j:slf4j-log4j12:${slf4jVersion}" @@ -460,21 +342,29 @@ subprojects { testCompile 'org.jmock:jmock-legacy:2.5.1' testCompile 'edu.umd.cs.mtc:multithreadedtc:1.01' testRuntime 'cglib:cglib-nodep:2.1_3' - testRuntime 'org.objenesis:objenesis:1.0' + testRuntime 'org.objenesis:objenesis:2.6' } test { dependsOn "${subprojectBase}storeProduct" - maxParallelForks = (Runtime.getRuntime().availableProcessors() * 3) / 2 + maxParallelForks = Runtime.getRuntime().availableProcessors() minHeapSize = '1g' maxHeapSize = '1g' + includes.clear() + def single = System.getProperty('junit.single') + if (single == null || single.length() == 0) { + single = rootProject.hasProperty('junit.single') ? + rootProject.property('junit.single') : null + } if (single == null || single.length() == 0) { include '**/*Test.class' - exclude '**/*DUnitTest.class' - exclude '**/*DUnit.class' + exclude '**/*TestUtil*' + exclude '**/*TestBase.class' + exclude '**/*DUnit*.class' exclude '**/derbyTesting/**' + exclude '**/pxf/**' } else { include single } @@ -490,44 +380,6 @@ subprojects { reports.html.destination = file("${workingDir}/html/${project.name}") reports.junitXml.destination = file(workingDir) } - task junitSingle1(type: Test) { - def single = System.getProperty('junit.single') - if (single == null || single.length() == 0) { - include '**/BugsTest.class' - include '**/DDLPersistenceHDFSTest.class' - } else { - include single - } - } - task junitSingle2(type: Test) { - include '**/*Test.class' - exclude '**/BugsTest.class' - exclude '**/DDLPersistenceHDFSTest.class' - exclude '**/*DUnitTest.class' - exclude '**/*DUnit.class' - exclude '**/derbyTesting/**' - exclude '**/client/am/**' - } - task junit { - def testClasses = new ArrayList() - testClasses.addAll(junitSingle1.getCandidateClassFiles().getFiles()) - def single = System.getProperty('junit.single') - if (single == null || single.length() == 0) { - testClasses.addAll(junitSingle2.getCandidateClassFiles().getFiles()) - } - parallelTests(project, testClasses, test, 'junit', 3, test.maxParallelForks) - } - task junitReport(type: TestReport) { - description 'Combines the parallel junit test reports.' - destinationDir = file("${testResultsBase}/junit/html/${project.name}") - mustRunAfter junitParallel - } - gradle.taskGraph.whenReady({ graph -> - project.tasks.getByName('junitReport').reportOn project.tasks.withType(Test).matching { - it.getName().startsWith('junitParallel') - } - }) - junit.dependsOn junitParallel, junitReport task dunitTest(type:Test, overwrite: true) { dependsOn "${subprojectBase}storeProduct" @@ -535,6 +387,8 @@ subprojects { minHeapSize = '2g' maxHeapSize = '2g' + includes.clear() + def single = System.getProperty('dunit.single') if (single == null || single.length() == 0) { single = rootProject.hasProperty('dunit.single') ? @@ -542,8 +396,10 @@ subprojects { } if (single == null || single.length() == 0) { def dunitTests = fileTree(dir: testClassesDir, - includes: ['**/*DUnitTest.class', '**/*DUnit.class'], - excludes: ['**/NCJ*DUnit.class', '**/pivotal/gemfirexd/wan/**/*DUnit.class']) + includes: ['**/*DUnit.class'], + excludes: ['**/NCJ*DUnit.class', '**/BackwardCompatabilityPart*DUnit.class', + '**/*Perf*DUnit.class', '**/ListAggDUnit.class', '**/SingleHop*TransactionDUnit.class', + '**/*Parallel*AsyncEvent*DUnit.class', '**/pivotal/gemfirexd/wan/**/*DUnit.class']) FileTree includeTestFiles = dunitTests int dunitFrom = rootProject.hasProperty('dunit.from') ? getLast(includeTestFiles, rootProject.property('dunit.from')) : 0 @@ -561,6 +417,13 @@ subprojects { } else { include single } + exclude '**/*NCJ*DUnit.class' + exclude '**/BackwardCompatabilityPart*DUnit.class' + exclude '**/*Perf*DUnit.class' + exclude '**/ListAggDUnit.class' + exclude '**/SingleHop*TransactionDUnit.class' + exclude '**/*Parallel*AsyncEvent*DUnit.class' + exclude '**/pivotal/gemfirexd/wan/**/*DUnit.class' workingDir = "${testResultsBase}/dunit" @@ -568,24 +431,9 @@ subprojects { reports.html.destination = file("${workingDir}/html/${project.name}") reports.junitXml.destination = file(workingDir) - //I'm hoping this might deal with SOME OOMEs I've seen - //forkEvery 30 + // try to avoid long "tail" in parallel runs where some workers finish early + forkEvery 10 } - task dunit { - def testClasses = dunitTest.getCandidateClassFiles().getFiles() - parallelTests(project, testClasses, dunitTest, 'dunit', 4, dunitTest.maxParallelForks) - } - task dunitReport(type: TestReport) { - description 'Combines the parallel dunit test reports.' - destinationDir = file("${testResultsBase}/dunit/html/${project.name}") - mustRunAfter dunitParallel - } - gradle.taskGraph.whenReady({ graph -> - project.tasks.getByName('dunitReport').reportOn project.tasks.withType(Test).matching { - it.getName().startsWith('dunitParallel') - } - }) - dunit.dependsOn dunitParallel, dunitReport task wanTest(type:Test) { dependsOn "${subprojectBase}storeProduct" @@ -593,12 +441,19 @@ subprojects { minHeapSize = '1g' maxHeapSize = '1g' + includes.clear() + def single = System.getProperty('wan.single') + if (single == null || single.length() == 0) { + single = rootProject.hasProperty('wan.single') ? + rootProject.property('wan.single') : null + } if (single == null || single.length() == 0) { include '**/pivotal/gemfirexd/wan/**/*DUnit.class' } else { include single } + exclude '**/*NCJ*DUnit.class' workingDir = "${testResultsBase}/wan" @@ -609,26 +464,13 @@ subprojects { // increase the number of JVMs for WAN tests systemProperties 'gemfire.DUnitLauncher.NUM_VMS' : '8' } - task wan { - def wanClasses = wanTest.getCandidateClassFiles().getFiles() - parallelTests(project, wanClasses, wanTest, 'wan', 1, wanTest.maxParallelForks) - } - task wanReport(type: TestReport) { - description 'Combines the parallel wan test reports.' - destinationDir = file("${testResultsBase}/wan/html/${project.name}") - mustRunAfter wanParallel - } - gradle.taskGraph.whenReady({ graph -> - project.tasks.getByName('wanReport').reportOn project.tasks.withType(Test).matching { - it.getName().startsWith('wanParallel') - } - }) - wan.dependsOn wanParallel, wanReport task integrationTest(type:Test) { dependsOn "${subprojectBase}storeProduct" maxParallelForks = Runtime.getRuntime().availableProcessors() + includes.clear() + include '**/*Test.class' exclude '**/*DUnitTest.class' exclude '**/*DUnit.class' @@ -678,32 +520,65 @@ subprojects { environment 'GEMFIREXD' : productDir.getAbsolutePath(), 'JUNIT_JAR' : sourceSets.test.output.classesDir + int numTestClasses = test.getCandidateClassFiles().getFiles().size() + def testCount = new java.util.concurrent.atomic.AtomicInteger(0) + + beforeSuite { desc -> + if (desc.className != null) { + def count = testCount.incrementAndGet() + println "${now()} Start ${desc.className} ($count/$numTestClasses)" + } + } + afterSuite { desc, result -> + if (desc.className != null) { + println "${now()} END ${desc.className}" + } + } + if (rootProject.name == 'snappy-store') { + def failureCount = new java.util.concurrent.atomic.AtomicInteger(0) + def progress = new File(workingDir, 'progress.txt') + def output = new File(workingDir, 'output.txt') + def eol = System.getProperty('line.separator') beforeTest { desc -> - def now = new Date().format('yyyy-MM-dd HH:mm:ss.SSS Z') - def progress = new File(workingDir, 'progress.txt') - def output = new File(workingDir, 'output.txt') + String now = now() progress << "${now} Starting test ${desc.className} ${desc.name}${eol}" output << "${now} STARTING TEST ${desc.className} ${desc.name}${eol}${eol}" } onOutput { desc, event -> - def output = new File(workingDir, 'output.txt') def msg = event.message if (event.destination.toString() == 'StdErr') { - msg = msg.replace('\n', '\n[stderr] ') + msg = msg.replace(eol, "${eol}[error] ") } output << msg } afterTest { desc, result -> - def now = new Date().format('yyyy-MM-dd HH:mm:ss.SSS Z') - def progress = new File(workingDir, 'progress.txt') - def output = new File(workingDir, 'output.txt') + String now = now() progress << "${now} Completed test ${desc.className} ${desc.name} with result: ${result.resultType}${eol}" output << "${eol}${now} COMPLETED TEST ${desc.className} ${desc.name} with result: ${result.resultType}${eol}${eol}" - result.exceptions.each { t -> - progress << " EXCEPTION: ${getStackTrace(t)}${eol}" - output << "${getStackTrace(t)}${eol}" + def exceptions = result.exceptions + if (exceptions.size() > 0) { + exceptions.each { t -> + progress << " EXCEPTION: ${getStackTrace(t)}${eol}" + output << "${getStackTrace(t)}${eol}" + } + failureCount.incrementAndGet() + } + } + doLast { + def report = "${test.reports.html.destination}/index.html" + if (failureCount.get() > 0) { + println() + def failureMsg = "FAILED: There were ${failureCount.get()} failures.${eol}" + failureMsg += " See the progress report in: file://$progress${eol}" + failureMsg += " HTML report in: file://$report" + throw new GradleException(failureMsg) + } else { + println() + println("SUCCESS: See the progress report in: file://$progress") + println(" HTML report in: file://$report") + println() } } } @@ -712,14 +587,11 @@ subprojects { }) test.dependsOn subprojectBase + 'cleanTestOutput' - junitParallel.dependsOn subprojectBase + 'cleanTestOutput' dunitTest.dependsOn subprojectBase + 'cleanDUnitOutput' - dunitParallel.dependsOn subprojectBase + 'cleanDUnitOutput' wanTest.dependsOn subprojectBase + 'cleanWanOutput' - wanParallel.dependsOn subprojectBase + 'cleanWanOutput' integrationTest.dependsOn subprojectBase + 'cleanIntegrationOutput' check.dependsOn.clear() - check.dependsOn junit, dunit, wan + check.dependsOn test, dunitTest, wanTest } // maven publish tasks @@ -801,13 +673,13 @@ int getLast(includeTestFiles, pattern) { } task junit { - dependsOn subprojects.junit + dependsOn subprojects.test } task dunit { - dependsOn "${subprojectBase}snappydata-store-tools:dunit" + dependsOn "${subprojectBase}snappydata-store-tools:dunitTest" } task wan { - dependsOn "${subprojectBase}snappydata-store-tools:wan" + dependsOn "${subprojectBase}snappydata-store-tools:wanTest" } task integrationTest { dependsOn subprojects.integrationTest @@ -824,10 +696,9 @@ gradle.taskGraph.whenReady({ graph -> tasks.getByName('combineReports').reportOn rootProject.subprojects.collect{ it.tasks.withType(Test) }.flatten() }) -junit.dependsOn subprojects.junit check.dependsOn.clear() check.dependsOn junit, dunit -if (!Boolean.getBoolean('skip.wanTest')) { +if (!rootProject.hasProperty('wan.skip')) { check.dependsOn wan } // skip combineReports for top-level builds which has its own combineReports @@ -837,6 +708,7 @@ if (!rootProject.hasProperty('store')) { task generateSources { dependsOn subprojectBase + 'gemfire-jgroups:jgMagic' + dependsOn subprojectBase + 'gemfire-util:compileJava' dependsOn subprojectBase + 'gemfire-core:createVersionPropertiesFile' dependsOn subprojectBase + 'snappydata-store-core:compileJavacc' dependsOn subprojectBase + 'snappydata-store-core:generatePropertiesFiles' @@ -864,7 +736,7 @@ install.enabled = false // pack the entire GemFireXD product tree task storeProduct(dependsOn: [ subprojectBase + 'snappydata-store-client:shadowJar', subprojectBase + 'snappydata-store-core:shadowJar', - subprojectBase + 'snappydata-store-tools:shadowJar' ]) << { + subprojectBase + 'snappydata-store-tools:shadowJar' ]) { doLast { delete productDir @@ -876,9 +748,9 @@ task storeProduct(dependsOn: [ subprojectBase + 'snappydata-store-client:shadowJ def gcmDir = System.getProperty('GCMDIR', '/gcm') - def extraJars = (gemcoreProject.configurations.provided - gemcoreProject.configurations.runtime + - coreProject.configurations.provided - coreProject.configurations.runtime + - toolsProject.configurations.provided - toolsProject.configurations.runtime).filter { + def extraJars = (gemcoreProject.configurations.compileOnly - gemcoreProject.configurations.runtime + + coreProject.configurations.compileOnly - coreProject.configurations.runtime + + toolsProject.configurations.compileOnly - toolsProject.configurations.runtime).filter { includeJar(it) } // first copy the product and dependent jars @@ -1021,7 +893,7 @@ task storeProduct(dependsOn: [ subprojectBase + 'snappydata-store-client:shadowJ } into (project(subprojectBase + 'snappydata-store-tests').buildDir.getAbsolutePath() + '/classes/main') } -} +} } if (rootProject.hasProperty('docs')) { storeProduct.dependsOn subprojectBase + 'snappydata-store-shared:javadoc', subprojectBase + 'snappydata-store-core:javadoc' diff --git a/gemfire-core/build.gradle b/gemfire-core/build.gradle index 7028f4f7e..eadcad134 100644 --- a/gemfire-core/build.gradle +++ b/gemfire-core/build.gradle @@ -28,68 +28,80 @@ configurations { } dependencies { - provided files("${System.getProperty('java.home')}/../lib/tools.jar") - provided 'antlr:antlr:2.7.7' + compileOnly files("${System.getProperty('java.home')}/../lib/tools.jar") + compileOnly 'antlr:antlr:2.7.7' compile 'commons-io:commons-io:2.5' compile 'commons-logging:commons-logging:1.2' compile 'commons-lang:commons-lang:2.6' compile('commons-modeler:commons-modeler:2.0.1') { exclude(group: 'xml-apis', module: 'xml-apis') } - compile 'commons-cli:commons-cli:1.3.1' - compile "it.unimi.dsi:fastutil:${fastutilVersion}" - compile 'javax.mail:javax.mail-api:1.5.5' + compile 'commons-cli:commons-cli:1.4' + compile 'javax.mail:javax.mail-api:1.6.0' compile 'javax.resource:javax.resource-api:1.7' compile 'javax.transaction:javax.transaction-api:1.2' compile 'mx4j:mx4j:3.0.2' compile 'mx4j:mx4j-remote:3.0.2' compile 'mx4j:mx4j-tools:3.0.1' - provided 'net.sourceforge.jline:jline:1.0.S2-B' - provided "org.eclipse.jetty:jetty-http:${jettyVersion}" - provided "org.eclipse.jetty:jetty-io:${jettyVersion}" - provided "org.eclipse.jetty:jetty-security:${jettyVersion}" - provided "org.eclipse.jetty:jetty-server:${jettyVersion}" - provided "org.eclipse.jetty:jetty-servlet:${jettyVersion}" - provided "org.eclipse.jetty:jetty-util:${jettyVersion}" - provided "org.eclipse.jetty:jetty-webapp:${jettyVersion}" - provided "org.eclipse.jetty:jetty-xml:${jettyVersion}" - compile 'org.codehaus.jackson:jackson-core-asl:1.9.13' - provided "org.springframework:spring-aop:${springVersion}" - provided "org.springframework:spring-beans:${springVersion}" - provided "org.springframework:spring-context:${springVersion}" - provided "org.springframework:spring-context-support:${springVersion}" - provided "org.springframework:spring-core:${springVersion}" - provided "org.springframework:spring-expression:${springVersion}" - provided "org.springframework:spring-web:${springVersion}" - provided "org.springframework:spring-webmvc:${springVersion}" - provided "org.springframework:spring-tx:${springVersion}" - provided 'org.springframework.shell:spring-shell:1.0.0.RELEASE' - compile 'org.xerial.snappy:snappy-java:1.1.2.6' - - provided "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-auth:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-common:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" - provided "com.google.protobuf:protobuf-java:${protobufVersion}" - provided "com.sun.jersey:jersey-core:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-server:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" - provided "org.mortbay.jetty:jetty:${hadoopJettyVersion}" - provided "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" - provided "com.google.code.findbugs:jsr305:${jsr305Version}" - provided "com.esotericsoftware:kryo-shaded:${kryoVersion}" - provided "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" - - compile group: 'org.apache.hbase', name: 'hbase-protocol', version: hbaseVersion - compile(group: 'org.apache.hbase', name: 'hbase-common', version: hbaseVersion) { + compileOnly 'net.sourceforge.jline:jline:1.0.S2-B' + compileOnly "org.eclipse.jetty:jetty-http:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-io:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-security:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-server:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-servlet:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-util:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-webapp:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-xml:${jettyVersion}" + compileOnly "org.springframework:spring-aop:${springVersion}" + compileOnly "org.springframework:spring-beans:${springVersion}" + compileOnly "org.springframework:spring-context:${springVersion}" + compileOnly "org.springframework:spring-context-support:${springVersion}" + compileOnly "org.springframework:spring-core:${springVersion}" + compileOnly "org.springframework:spring-expression:${springVersion}" + compileOnly "org.springframework:spring-web:${springVersion}" + compileOnly "org.springframework:spring-webmvc:${springVersion}" + compileOnly "org.springframework:spring-tx:${springVersion}" + compileOnly 'org.springframework.shell:spring-shell:1.0.0.RELEASE' + compile 'org.xerial.snappy:snappy-java:1.1.4' + + compileOnly "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-auth:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-common:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" + compileOnly "com.google.protobuf:protobuf-java:${protobufVersion}" + compileOnly "org.glassfish.jersey.core:jersey-server:${jerseyVersion}" + compileOnly "org.glassfish.jersey.containers:jersey-container-servlet-core:${jerseyVersion}" + compileOnly "org.eclipse.jetty:jetty-server:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-util:${jettyVersion}" + compileOnly "com.google.code.findbugs:jsr305:${jsr305Version}" + compileOnly "com.esotericsoftware:kryo-shaded:${kryoVersion}" + compileOnly "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" + + compile "io.netty:netty-all:${nettyAllVersion}" + compile 'javax.ws.rs:javax.ws.rs-api:2.0.1' + compile 'org.apache.htrace:htrace-core:3.2.0-incubating' + compile "org.codehaus.jackson:jackson-core-asl:${jackson1Version}" + compile "org.codehaus.jackson:jackson-mapper-asl:${jackson1Version}" + + compile 'org.apache.commons:commons-pool2:2.4.2' + compile "com.google.guava:guava:${guavaVersion}" + compile 'xml-apis:xml-apis:1.0.b2' + compile('com.io7m.xom:xom:1.2.10') { + exclude(group: 'xalan', module: 'xalan') + exclude(group: 'xerces', module: 'xercesImpl') + exclude(group: 'xml-apis', module: 'xml-apis') + } + + compileOnly group: 'org.apache.hbase', name: 'hbase-protocol', version: hbaseVersion + compileOnly(group: 'org.apache.hbase', name: 'hbase-common', version: hbaseVersion) { exclude(group: 'org.apache.hbase', module: 'hbase-annotations') } - compile(group: 'org.apache.hbase', name: 'hbase-client', version: hbaseVersion) { + compileOnly(group: 'org.apache.hbase', name: 'hbase-client', version: hbaseVersion) { exclude(group: 'org.apache.hbase', module: 'hbase-annotations') exclude(group: 'io.netty', module: 'netty') } - compile(group: 'org.apache.hbase', name: 'hbase-server', version: hbaseVersion) { + compileOnly(group: 'org.apache.hbase', name: 'hbase-server', version: hbaseVersion) { exclude(group: 'org.apache.hbase', module: 'hbase-annotations') exclude(group: 'org.apache.hadoop', module: 'hadoop-core') exclude(group: 'org.apache.hadoop', module: 'hadoop-client') @@ -107,26 +119,11 @@ dependencies { exclude(group: 'com.sun.jersey', module: 'jersey-json') exclude(group: 'commons-io', module: 'commons-io') } - compile "io.netty:netty-all:${nettyAllVersion}" - compile 'javax.ws.rs:javax.ws.rs-api:2.0.1' - compile 'org.cloudera.htrace:htrace-core:2.05' - compile 'org.apache.htrace:htrace-core:3.2.0-incubating' - compile 'org.codehaus.jackson:jackson-mapper-asl:1.9.13' - - compile 'org.apache.commons:commons-pool2:2.4.2' - compile "com.google.guava:guava:${guavaVersion}" - compile 'xml-apis:xml-apis:2.0.2' - compile('com.io7m.xom:xom:1.2.10') { - exclude(group: 'xalan', module: 'xalan') - exclude(group: 'xerces', module: 'xercesImpl') - exclude(group: 'xml-apis', module: 'xml-apis') - } - provided 'com.jcraft:jsch:0.1.53' - provided "org.apache.ant:ant:${antVersion}" + compileOnly 'com.jcraft:jsch:0.1.53' + compileOnly "org.apache.ant:ant:${antVersion}" compile project(subprojectBase + 'gemfire-jgroups') - compile project(subprojectBase + 'gemfire-joptsimple') - compile project(subprojectBase + 'gemfire-json') + compile project(subprojectBase + 'gemfire-util') compile project(subprojectBase + 'gemfire-shared') jcaCompile sourceSets.main.output diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionConfig.java b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionConfig.java index 117e6668f..367bdab59 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionConfig.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/distributed/internal/DistributionConfig.java @@ -1991,7 +1991,7 @@ public interface DistributionConfig extends Config, ManagerLogWriter.LogConfig { public static final String GEMFIRE_PREFIX = "gemfire."; /** The prefix used for SnappyData properties set through java system properties */ - public static final String SNAPPY_PREFIX = "snappydata.store."; + public static final String SNAPPY_PREFIX = SystemProperties.SNAPPY_PREFIX; /** For the "custom-" prefixed properties */ public static final String USERDEFINED_PREFIX_NAME = "custom-"; diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/NanoTimer.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/NanoTimer.java index 4f48a8c3e..977116002 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/NanoTimer.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/NanoTimer.java @@ -72,18 +72,12 @@ public final class NanoTimer { */ try { isNativeTimer = GemFireCacheImpl.gfxdSystem() - && NativeCalls.getInstance().loadNativeLibrary() ? SharedLibrary - .register("gemfirexd") : false; + && NativeCalls.getInstance().loadNativeLibrary() + && SharedLibrary.register("gemfirexd"); // test method call. can throw UnsatisfiedLinkError if unsuccessful. _nanoTime(NativeCalls.CLOCKID_REALTIME); - } catch (Exception e) { - isNativeTimer = false; - if (SharedLibrary.debug) { - SharedLibrary.logInitMessage(LogWriterImpl.WARNING_LEVEL, - "_nanoTime couldn't be invoked successfully.", e); - } - } catch (UnsatisfiedLinkError e) { + } catch (Exception | UnsatisfiedLinkError e) { isNativeTimer = false; if (SharedLibrary.debug) { SharedLibrary.logInitMessage(LogWriterImpl.WARNING_LEVEL, diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java index b30df7250..cffa93b5a 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/CachePerfStats.java @@ -1468,4 +1468,12 @@ public long getEvaluations() { public long getEvaluationTime() { return stats.getLong(evictByCriteria_evaluationTimeId); } + + /** only used by tests */ + public void clearEvictionByCriteriaStatsForTest() { + stats.setLong(evictByCriteria_evictionsId, 0L); + stats.setLong(evictByCriteria_evictionsInProgressId, 0L); + stats.setLong(evictByCriteria_evaluationsId, 0L); + stats.setLong(evictByCriteria_evaluationTimeId, 0L); + } } diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java index 8ad07016d..b699c40db 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/GemFireCacheImpl.java @@ -5619,7 +5619,7 @@ public static boolean gfxdSystem() { public static void setGFXDSystem(final boolean v) { // check the stack to see if this is really from a GemFireXD system - gfxdSystem = v ? SystemProperties.isUsingGemFireXDEntryPoint() : false; + gfxdSystem = v && SystemProperties.isUsingGemFireXDEntryPoint(); } /** @@ -6100,6 +6100,7 @@ public static class FactoryStatics { public static synchronized void init() { // set custom entry factories for GemFireXD if (gfxdSystem || SystemProperties.isUsingGemFireXDEntryPoint()) { + gfxdSystem = true; String provider = SystemProperties.GFXD_FACTORY_PROVIDER; try { Class factoryProvider = ClassPathLoader.getLatest().forName( diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java index 8c6a9b471..170975128 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/PRHARedundancyProvider.java @@ -62,8 +62,8 @@ import com.gemstone.gemfire.internal.concurrent.AL; import com.gemstone.gemfire.internal.concurrent.CFactory; import com.gemstone.gemfire.internal.i18n.LocalizedStrings; +import com.gemstone.gemfire.internal.shared.OpenHashSet; import com.gemstone.org.jgroups.util.StringId; -import it.unimi.dsi.fastutil.objects.ObjectOpenHashSet; /** * This class provides the redundancy management for partitioned region. It will @@ -550,7 +550,7 @@ public InternalDistributedMember createBucketOnDataStore(int bucketId, this.prRegion.bucketStringForLogs(bucketId)); } Collection acceptedMembers = new ArrayList(); // ArrayList - ObjectOpenHashSet excludedMembers = new ObjectOpenHashSet<>(); + OpenHashSet excludedMembers = new OpenHashSet<>(); ArrayListWithClearState failedMembers = new ArrayListWithClearState(); final long timeOut = System.currentTimeMillis() + computeTimeout(); BucketMembershipObserver observer = null; @@ -1396,7 +1396,7 @@ private InternalDistributedMember getPreferredDataStore( // Convert peers to DataStoreBuckets ArrayList stores = this.prRegion.getRegionAdvisor() - .adviseFilteredDataStores(new ObjectOpenHashSet<>(candidates)); + .adviseFilteredDataStores(new OpenHashSet<>(candidates)); final DM dm = this.prRegion.getDistributionManager(); // Add ourself as a candidate, if appropriate diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java index bdac4512b..25446f0e8 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/RegionEntry.java @@ -428,7 +428,8 @@ public boolean destroy(LocalRegion region, /** * Get the value in region or disk without faultin in raw form without any - * change in storage format (SnappyData off-heap will return off-heap buffer). + * change in storage format (SnappyData off-heap will return off-heap + * buffer with a retain so caller should do a release once done). */ @Retained public Object getValueInVMOrDiskWithoutFaultIn(LocalRegion owner); diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/lru/NewLRUClockHand.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/lru/NewLRUClockHand.java index d3bdfb019..c4215777f 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/lru/NewLRUClockHand.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/lru/NewLRUClockHand.java @@ -31,7 +31,6 @@ import com.gemstone.gemfire.internal.cache.PlaceHolderDiskRegion; import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector; import com.gemstone.gemfire.internal.shared.unsafe.UnsafeHolder; -import com.gemstone.gemfire.internal.snappy.CallbackFactoryProvider; /** @@ -62,9 +61,6 @@ public class NewLRUClockHand { static private final int maxEntries; -private boolean snappyStore = - CallbackFactoryProvider.getStoreCallbacks().isSnappyStore(); - static { String squelch = System.getProperty("gemfire.lru.maxSearchEntries"); if (squelch == null) @@ -263,18 +259,6 @@ public LRUClockNode getLRUEntry(boolean skipLockedEntries) { continue; } - // Checking whether this entry is outside lock , - // so that we won;t attempt to evict an entry whose - // faultIn is in process - // TODO Remove SnappyStore check after 0.9 . Added this check to - // reduce regression cycles - if (snappyStore && (aNode.isValueNull()|| aNode.testEvicted())) { - if (debug) { - logWriter - .info(LocalizedStrings.NewLRUClockHand_DISCARDING_EVICTED_ENTRY); - } - continue; - } boolean success = false; // if required skip a locked entry and keep the lock (caller should release) if (skipLockedEntries) { diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/RegionAdvisor.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/RegionAdvisor.java index a37cdc6dd..b6fdc520b 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/RegionAdvisor.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/cache/partitioned/RegionAdvisor.java @@ -70,7 +70,7 @@ import com.gemstone.gemfire.internal.concurrent.S; import com.gemstone.gemfire.internal.i18n.LocalizedStrings; import com.gemstone.gnu.trove.THashSet; -import it.unimi.dsi.fastutil.objects.Object2IntOpenHashMap; +import io.snappydata.collection.ObjectLongHashMap; public final class RegionAdvisor extends CacheDistributionAdvisor { @@ -1555,13 +1555,13 @@ public Object next() { */ public ArrayList adviseFilteredDataStores(final Set memberFilter) { - final Object2IntOpenHashMap memberToPrimaryCount = - new Object2IntOpenHashMap<>(); + final ObjectLongHashMap memberToPrimaryCount = + ObjectLongHashMap.withExpectedSize(16); for (ProxyBucketRegion pbr : this.buckets) { // quick dirty check InternalDistributedMember p=pbr.getBucketAdvisor().basicGetPrimaryMember(); if (p!=null) { - memberToPrimaryCount.addTo(p, 1); + memberToPrimaryCount.put(p, memberToPrimaryCount.getLong(p) + 1); } } @@ -1571,7 +1571,7 @@ public boolean include(Profile profile) { if (profile instanceof PartitionProfile) { PartitionProfile p = (PartitionProfile)profile; if(memberFilter.contains(p.getDistributedMember())) { - int primaryCount = memberToPrimaryCount.getInt(p.getDistributedMember()); + int primaryCount = (int)memberToPrimaryCount.getLong(p.getDistributedMember()); ds.add(new DataStoreBuckets(p.getDistributedMember(), p.numBuckets, primaryCount, p.localMaxMemory)); } } diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/snappy/StoreCallbacks.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/snappy/StoreCallbacks.java index dacbc63bb..eaa5931ec 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/snappy/StoreCallbacks.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/snappy/StoreCallbacks.java @@ -22,17 +22,12 @@ import com.gemstone.gemfire.internal.cache.BucketRegion; import com.gemstone.gemfire.internal.cache.EntryEventImpl; +import com.gemstone.gemfire.internal.shared.SystemProperties; import com.gemstone.gemfire.internal.snappy.memory.MemoryManagerStats; public interface StoreCallbacks { - String SHADOW_SCHEMA_NAME = "SNAPPYSYS_INTERNAL"; - - String SHADOW_TABLE_SUFFIX = "_COLUMN_STORE_"; - - String SHADOW_SCHEMA_SEPARATOR = "____"; - - String SHADOW_SCHEMA_NAME_WITH_SEPARATOR = SHADOW_SCHEMA_NAME + SHADOW_SCHEMA_SEPARATOR; + String SHADOW_TABLE_SUFFIX = SystemProperties.SHADOW_TABLE_SUFFIX; void registerTypes(); diff --git a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/ConnectionTable.java b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/ConnectionTable.java index 925ff80bf..3d65f9658 100644 --- a/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/ConnectionTable.java +++ b/gemfire-core/src/main/java/com/gemstone/gemfire/internal/tcp/ConnectionTable.java @@ -393,7 +393,7 @@ public void destroyObject(ConnKey key, @Override public boolean validateObject(ConnKey key, PooledObject p) { - return p.getObject().connected; + return !p.getObject().isClosing(); } @Override diff --git a/gemfire-examples/build.gradle b/gemfire-examples/build.gradle index af8dd3293..3e3e4bc54 100644 --- a/gemfire-examples/build.gradle +++ b/gemfire-examples/build.gradle @@ -16,7 +16,7 @@ */ dependencies { - provided project(subprojectBase + 'gemfire-core') + compileOnly project(subprojectBase + 'gemfire-core') compile "org.osgi:org.osgi.core:${osgiVersion}" } diff --git a/gemfire-junit/build.gradle b/gemfire-junit/build.gradle index 5be722a36..2fb52bea3 100644 --- a/gemfire-junit/build.gradle +++ b/gemfire-junit/build.gradle @@ -27,45 +27,54 @@ dependencies { compile 'org.jmock:jmock-legacy:2.5.1' compile 'edu.umd.cs.mtc:multithreadedtc:1.01' compile 'cglib:cglib-nodep:2.1_3' - compile 'org.objenesis:objenesis:1.0' + compile 'org.objenesis:objenesis:2.6' compile 'antlr:antlr:2.7.7' - provided 'net.sourceforge.jline:jline:1.0.S2-B' - provided "org.eclipse.jetty:jetty-http:${jettyVersion}" - provided "org.eclipse.jetty:jetty-io:${jettyVersion}" - provided "org.eclipse.jetty:jetty-security:${jettyVersion}" - provided "org.eclipse.jetty:jetty-server:${jettyVersion}" - provided "org.eclipse.jetty:jetty-servlet:${jettyVersion}" - provided "org.eclipse.jetty:jetty-util:${jettyVersion}" - provided "org.eclipse.jetty:jetty-webapp:${jettyVersion}" - provided "org.eclipse.jetty:jetty-xml:${jettyVersion}" + compile 'net.sourceforge.jline:jline:1.0.S2-B' + compile "org.eclipse.jetty:jetty-http:${jettyVersion}" + compile "org.eclipse.jetty:jetty-io:${jettyVersion}" + compile "org.eclipse.jetty:jetty-security:${jettyVersion}" + compile "org.eclipse.jetty:jetty-server:${jettyVersion}" + compile "org.eclipse.jetty:jetty-servlet:${jettyVersion}" + compile "org.eclipse.jetty:jetty-util:${jettyVersion}" + compile "org.eclipse.jetty:jetty-webapp:${jettyVersion}" + compile "org.eclipse.jetty:jetty-xml:${jettyVersion}" - provided "org.springframework:spring-aop:${springVersion}" - provided "org.springframework:spring-beans:${springVersion}" - provided "org.springframework:spring-context:${springVersion}" - provided "org.springframework:spring-context-support:${springVersion}" - provided "org.springframework:spring-core:${springVersion}" - provided "org.springframework:spring-expression:${springVersion}" - provided "org.springframework:spring-web:${springVersion}" - provided "org.springframework:spring-webmvc:${springVersion}" - provided "org.springframework:spring-tx:${springVersion}" - provided 'org.springframework.shell:spring-shell:1.0.0.RELEASE' + compile "org.springframework:spring-aop:${springVersion}" + compile "org.springframework:spring-beans:${springVersion}" + compile "org.springframework:spring-context:${springVersion}" + compile "org.springframework:spring-context-support:${springVersion}" + compile "org.springframework:spring-core:${springVersion}" + compile "org.springframework:spring-expression:${springVersion}" + compile "org.springframework:spring-web:${springVersion}" + compile "org.springframework:spring-webmvc:${springVersion}" + compile "org.springframework:spring-tx:${springVersion}" + compile 'org.springframework.shell:spring-shell:1.0.0.RELEASE' - provided "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-auth:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-common:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" - provided "com.google.protobuf:protobuf-java:${protobufVersion}" - provided "com.sun.jersey:jersey-core:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-server:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" - provided "org.mortbay.jetty:jetty:${hadoopJettyVersion}" - provided "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" - provided "com.google.code.findbugs:jsr305:${jsr305Version}" + compile "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" + compile "org.apache.hadoop:hadoop-auth:${hadoopVersion}" + compile "org.apache.hadoop:hadoop-common:${hadoopVersion}" + compile "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" + compile "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" + compile "com.google.protobuf:protobuf-java:${protobufVersion}" + compile "org.apache.hbase:hbase-common:${hbaseVersion}" + compile "org.apache.hbase:hbase-protocol:${hbaseVersion}" + compile "org.apache.hbase:hbase-client:${hbaseVersion}" + compile "org.apache.hbase:hbase-server:${hbaseVersion}" + compile "com.sun.jersey:jersey-core:${sunJerseyVersion}" + compile "com.sun.jersey:jersey-server:${sunJerseyVersion}" + compile "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" + compile "org.mortbay.jetty:jetty:${hadoopJettyVersion}" + compile "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" + compile 'org.cloudera.htrace:htrace-core:2.05' + compile "org.glassfish.jersey.core:jersey-server:${jerseyVersion}" + compile "org.glassfish.jersey.containers:jersey-container-servlet-core:${jerseyVersion}" + compile "org.eclipse.jetty:jetty-server:${jettyVersion}" + compile "org.eclipse.jetty:jetty-util:${jettyVersion}" + compile "com.google.code.findbugs:jsr305:${jsr305Version}" - provided "org.apache.hadoop:hadoop-common:${hadoopVersion}:tests" - provided "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}:tests" + compile "org.apache.hadoop:hadoop-common:${hadoopVersion}:tests" + compile "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}:tests" // use dunit support from snappydata if (subprojectBase == ':') { diff --git a/gemfire-shared/build.gradle b/gemfire-shared/build.gradle index edc0cf270..2725e8e67 100644 --- a/gemfire-shared/build.gradle +++ b/gemfire-shared/build.gradle @@ -16,8 +16,8 @@ */ dependencies { - compile 'net.java.dev.jna:jna:4.2.2' + compile 'net.java.dev.jna:jna:4.5.0' compile 'commons-io:commons-io:2.5' compile project(subprojectBase + 'gemfire-trove') - provided "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" + compileOnly "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" } diff --git a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/ClientSharedUtils.java b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/ClientSharedUtils.java index 79333a1eb..558e93750 100644 --- a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/ClientSharedUtils.java +++ b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/ClientSharedUtils.java @@ -1336,7 +1336,7 @@ public static RuntimeException newRuntimeException(String message, } // Convert log4j.Level to java.util.logging.Level - public static Level converToJavaLogLevel(org.apache.log4j.Level log4jLevel) { + public static Level convertToJavaLogLevel(org.apache.log4j.Level log4jLevel) { Level javaLevel = Level.INFO; if (log4jLevel != null) { if (log4jLevel == org.apache.log4j.Level.ERROR) { @@ -1356,6 +1356,26 @@ public static Level converToJavaLogLevel(org.apache.log4j.Level log4jLevel) { return javaLevel; } + public static String convertToLog4LogLevel(Level level) { + String levelStr = "INFO"; + // convert to log4j level + if (level == Level.SEVERE) { + levelStr = "ERROR"; + } else if (level == Level.WARNING) { + levelStr = "WARN"; + } else if (level == Level.INFO || level == Level.CONFIG) { + levelStr = "INFO"; + } else if (level == Level.FINE || level == Level.FINER || + level == Level.FINEST) { + levelStr = "TRACE"; + } else if (level == Level.ALL) { + levelStr = "DEBUG"; + } else if (level == Level.OFF) { + levelStr = "OFF"; + } + return levelStr; + } + public static void initLog4J(String logFile, Level level) throws IOException { initLog4J(logFile, null, level); @@ -1375,22 +1395,7 @@ public static void initLog4J(String logFile, Properties userProps, // override file location and level if (level != null) { - String levelStr = "INFO"; - // convert to log4j level - if (level == Level.SEVERE) { - levelStr = "ERROR"; - } else if (level == Level.WARNING) { - levelStr = "WARN"; - } else if (level == Level.INFO || level == Level.CONFIG) { - levelStr = "INFO"; - } else if (level == Level.FINE || level == Level.FINER || - level == Level.FINEST) { - levelStr = "TRACE"; - } else if (level == Level.ALL) { - levelStr = "DEBUG"; - } else if (level == Level.OFF) { - levelStr = "OFF"; - } + final String levelStr = convertToLog4LogLevel(level); if (logFile != null) { props.setProperty("log4j.rootCategory", levelStr + ", file"); } else { diff --git a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/OpenHashSet.java b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/OpenHashSet.java index 342a2ce51..2e7d7e292 100644 --- a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/OpenHashSet.java +++ b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/OpenHashSet.java @@ -17,6 +17,7 @@ package com.gemstone.gemfire.internal.shared; import java.util.AbstractSet; +import java.util.Collection; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Set; @@ -30,8 +31,11 @@ /** * An optimized HashSet using open addressing with quadratic probing. * In micro-benchmarks this is faster in both inserts, deletes and gets, - * as well as mixed workloads than all other HashSet implementations + * as well as mixed workloads than most other HashSet implementations * generally available in java (JDK HashSet, fastutil HashSets, or Trove's). + *

+ * It adds additional APIs like {@link #create}, {@link #getKey}, + * {@link #addKey} which is the main reason for having this class. */ public class OpenHashSet extends AbstractSet implements Set, Cloneable, java.io.Serializable { @@ -78,6 +82,11 @@ public OpenHashSet(int initialCapacity, float loadFactor, ? hashingStrategy : THashParameters.DEFAULT_HASHING; } + public OpenHashSet(Collection c) { + this(c.size()); + addAll(c); + } + public static int keyHash(Object k, TObjectHashingStrategy hashingStrategy) { return ClientResolverUtils.fastHashInt(hashingStrategy.computeHashCode(k)); } diff --git a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/SystemProperties.java b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/SystemProperties.java index f6a96f202..662a02ccc 100644 --- a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/SystemProperties.java +++ b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/SystemProperties.java @@ -72,6 +72,20 @@ public final class SystemProperties { public static final String DEFAULT_GFXDCLIENT_PROPERTY_NAME_PREFIX = "gemfirexd.client."; + /** The prefix used for SnappyData properties set through java system properties */ + public static final String SNAPPY_PREFIX = "snappydata.store."; + + public static final String SHADOW_SCHEMA_NAME = "SNAPPYSYS_INTERNAL"; + + public static final String SHADOW_TABLE_SUFFIX = "_COLUMN_STORE_"; + + public static final String SHADOW_SCHEMA_SEPARATOR = "____"; + + public static final String SHADOW_SCHEMA_NAME_WITH_SEPARATOR = + SHADOW_SCHEMA_NAME + SHADOW_SCHEMA_SEPARATOR; + + public static final String SNAPPY_HIVE_METASTORE = "SNAPPY_HIVE_METASTORE"; + public static final String GFXD_FACTORY_PROVIDER = "com.pivotal.gemfirexd." + "internal.engine.store.entry.GfxdObjectFactoriesProvider"; @@ -174,8 +188,11 @@ public static boolean isUsingGemFireXDEntryPoint() { .equals(frameCls) || "com.pivotal.gemfirexd.internal.GemFireXDVersion" .equals(frameCls) || - "io.snappydata.gemxd.SnappyDataVersion$" - .equals(frameCls)) { + "io.snappydata.gemxd.SnappyDataVersion$".equals(frameCls) || + "io.snappydata.gemxd.SnappyDataVersion".equals(frameCls) || + "org.apache.spark.sql.SnappySession$".equals(frameCls) || + "org.apache.spark.sql.SnappySession".equals(frameCls) || + "io.snappydata.gemxd.ClusterCallbacksImpl$".equals(frameCls)) { return true; } } diff --git a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/LinuxNativeCalls.java b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/LinuxNativeCalls.java index cff1961fb..63987b79d 100644 --- a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/LinuxNativeCalls.java +++ b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/LinuxNativeCalls.java @@ -158,7 +158,7 @@ public static native int clock_gettime(int clkId, TimeSpec time) throws LastErrorException; @Override - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("tv_sec", "tv_nsec"); } } @@ -195,7 +195,7 @@ public static native int clock_gettime(int clkId, TimeSpec64 time) throws LastErrorException; @Override - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("tv_sec", "tv_nsec"); } } @@ -274,7 +274,7 @@ public static class FSIDIntArr2 extends Structure { @SuppressWarnings("unused") public int[] fsid = new int[2]; - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("fsid"); } } @@ -284,7 +284,7 @@ public static class FSPAREIntArr5 extends Structure { @SuppressWarnings("unused") public int[] fspare = new int[5]; - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("fspare"); } } @@ -326,7 +326,7 @@ public static native int statfs(String path, StatFS statfs) throws LastErrorException; @Override - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("f_type", "f_bsize", "f_blocks", "f_bfree", "f_bavail", "f_files", "f_ffree", "f_fsid", "f_namelen", "f_frsize", "f_spare"); @@ -365,7 +365,7 @@ public static class FSPARELongArr5 extends Structure { @SuppressWarnings("unused") public long[] fspare = new long[5]; - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("fspare"); } } @@ -424,7 +424,7 @@ public static native int statfs(String path, StatFS64 statfs) throws LastErrorException; @Override - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("f_type", "f_bsize", "f_blocks", "f_bfree", "f_bavail", "f_files", "f_ffree", "f_fsid", "f_namelen", "f_frsize", "f_spare"); diff --git a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/POSIXNativeCalls.java b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/POSIXNativeCalls.java index d0142f2df..6c72db0f7 100644 --- a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/POSIXNativeCalls.java +++ b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/POSIXNativeCalls.java @@ -133,7 +133,7 @@ public static class RLimit extends Structure { public long rlim_max; @Override - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("rlim_cur", "rlim_max"); } } diff --git a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/WinNativeCalls.java b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/WinNativeCalls.java index 3bdd3b53d..f9459e945 100644 --- a/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/WinNativeCalls.java +++ b/gemfire-shared/src/main/java/com/gemstone/gemfire/internal/shared/jna/WinNativeCalls.java @@ -72,7 +72,7 @@ public static final class TcpKeepAlive extends Structure { public int keepaliveinterval; @Override - protected List getFieldOrder() { + protected List getFieldOrder() { return Arrays.asList("enabled", "keepalivetime", "keepaliveinterval"); } diff --git a/gemfire-util/build.gradle b/gemfire-util/build.gradle new file mode 100644 index 000000000..22c9cb7bc --- /dev/null +++ b/gemfire-util/build.gradle @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017 SnappyData, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +plugins { + id "net.ltgt.apt" version "0.12" +} + +apply plugin: 'net.ltgt.apt-eclipse' +apply plugin: 'net.ltgt.apt-idea' + +dependencies { + compile "com.koloboke:koloboke-api-jdk8:${kolobokeVersion}" + compile "com.koloboke:koloboke-impl-common-jdk8:${kolobokeVersion}" + // for classes using koloboke annotations + compileOnly "com.koloboke:koloboke-compile:${kolobokeCompileVersion}" +} diff --git a/gemfire-util/src/main/java/io/snappydata/collection/IntObjectHashMap.java b/gemfire-util/src/main/java/io/snappydata/collection/IntObjectHashMap.java new file mode 100644 index 000000000..fc31e88de --- /dev/null +++ b/gemfire-util/src/main/java/io/snappydata/collection/IntObjectHashMap.java @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2017 SnappyData, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package io.snappydata.collection; + +import java.util.Collection; + +import com.koloboke.compile.KolobokeMap; + +@KolobokeMap +public abstract class IntObjectHashMap { + + public static IntObjectHashMap withExpectedSize(int expectedSize) { + return new KolobokeIntObjectHashMap<>(expectedSize); + } + + public abstract V put(int key, V value); + + public abstract V get(int key); + + public abstract Collection values(); + + public abstract int size(); + + public abstract void clear(); +} diff --git a/gemfire-util/src/main/java/io/snappydata/collection/ObjectLongHashMap.java b/gemfire-util/src/main/java/io/snappydata/collection/ObjectLongHashMap.java new file mode 100644 index 000000000..a7008a765 --- /dev/null +++ b/gemfire-util/src/main/java/io/snappydata/collection/ObjectLongHashMap.java @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2017 SnappyData, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package io.snappydata.collection; + +import java.util.Map; +import java.util.Set; + +import com.koloboke.compile.KolobokeMap; + +@KolobokeMap +public abstract class ObjectLongHashMap { + + public static ObjectLongHashMap withExpectedSize(int expectedSize) { + return new KolobokeObjectLongHashMap<>(expectedSize); + } + + public abstract long put(K key, long value); + + public abstract long getLong(K key); + + public abstract boolean containsKey(K key); + + public abstract long removeAsLong(K key); + + public abstract Set keySet(); + + public abstract Set> entrySet(); + + public abstract int size(); + + public abstract void clear(); + + public final long addTo(K key, long delta) { + return put(key, getLong(key) + delta); + } +} diff --git a/gemfire-joptsimple/src/main/java/joptsimple/AbstractOptionSpec.java b/gemfire-util/src/main/java/joptsimple/AbstractOptionSpec.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/AbstractOptionSpec.java rename to gemfire-util/src/main/java/joptsimple/AbstractOptionSpec.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/AlternativeLongOptionSpec.java b/gemfire-util/src/main/java/joptsimple/AlternativeLongOptionSpec.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/AlternativeLongOptionSpec.java rename to gemfire-util/src/main/java/joptsimple/AlternativeLongOptionSpec.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/ArgumentAcceptingOptionSpec.java b/gemfire-util/src/main/java/joptsimple/ArgumentAcceptingOptionSpec.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/ArgumentAcceptingOptionSpec.java rename to gemfire-util/src/main/java/joptsimple/ArgumentAcceptingOptionSpec.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/ArgumentList.java b/gemfire-util/src/main/java/joptsimple/ArgumentList.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/ArgumentList.java rename to gemfire-util/src/main/java/joptsimple/ArgumentList.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/BuiltinHelpFormatter.java b/gemfire-util/src/main/java/joptsimple/BuiltinHelpFormatter.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/BuiltinHelpFormatter.java rename to gemfire-util/src/main/java/joptsimple/BuiltinHelpFormatter.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/HelpFormatter.java b/gemfire-util/src/main/java/joptsimple/HelpFormatter.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/HelpFormatter.java rename to gemfire-util/src/main/java/joptsimple/HelpFormatter.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/IllegalOptionSpecificationException.java b/gemfire-util/src/main/java/joptsimple/IllegalOptionSpecificationException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/IllegalOptionSpecificationException.java rename to gemfire-util/src/main/java/joptsimple/IllegalOptionSpecificationException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/MissingRequiredOptionException.java b/gemfire-util/src/main/java/joptsimple/MissingRequiredOptionException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/MissingRequiredOptionException.java rename to gemfire-util/src/main/java/joptsimple/MissingRequiredOptionException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/MultipleArgumentsForOptionException.java b/gemfire-util/src/main/java/joptsimple/MultipleArgumentsForOptionException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/MultipleArgumentsForOptionException.java rename to gemfire-util/src/main/java/joptsimple/MultipleArgumentsForOptionException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/NoArgumentOptionSpec.java b/gemfire-util/src/main/java/joptsimple/NoArgumentOptionSpec.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/NoArgumentOptionSpec.java rename to gemfire-util/src/main/java/joptsimple/NoArgumentOptionSpec.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionArgumentConversionException.java b/gemfire-util/src/main/java/joptsimple/OptionArgumentConversionException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionArgumentConversionException.java rename to gemfire-util/src/main/java/joptsimple/OptionArgumentConversionException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionDescriptor.java b/gemfire-util/src/main/java/joptsimple/OptionDescriptor.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionDescriptor.java rename to gemfire-util/src/main/java/joptsimple/OptionDescriptor.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionException.java b/gemfire-util/src/main/java/joptsimple/OptionException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionException.java rename to gemfire-util/src/main/java/joptsimple/OptionException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionMissingRequiredArgumentException.java b/gemfire-util/src/main/java/joptsimple/OptionMissingRequiredArgumentException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionMissingRequiredArgumentException.java rename to gemfire-util/src/main/java/joptsimple/OptionMissingRequiredArgumentException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionParser.java b/gemfire-util/src/main/java/joptsimple/OptionParser.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionParser.java rename to gemfire-util/src/main/java/joptsimple/OptionParser.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionParserState.java b/gemfire-util/src/main/java/joptsimple/OptionParserState.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionParserState.java rename to gemfire-util/src/main/java/joptsimple/OptionParserState.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionSet.java b/gemfire-util/src/main/java/joptsimple/OptionSet.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionSet.java rename to gemfire-util/src/main/java/joptsimple/OptionSet.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionSpec.java b/gemfire-util/src/main/java/joptsimple/OptionSpec.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionSpec.java rename to gemfire-util/src/main/java/joptsimple/OptionSpec.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionSpecBuilder.java b/gemfire-util/src/main/java/joptsimple/OptionSpecBuilder.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionSpecBuilder.java rename to gemfire-util/src/main/java/joptsimple/OptionSpecBuilder.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionSpecTokenizer.java b/gemfire-util/src/main/java/joptsimple/OptionSpecTokenizer.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionSpecTokenizer.java rename to gemfire-util/src/main/java/joptsimple/OptionSpecTokenizer.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/OptionalArgumentOptionSpec.java b/gemfire-util/src/main/java/joptsimple/OptionalArgumentOptionSpec.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/OptionalArgumentOptionSpec.java rename to gemfire-util/src/main/java/joptsimple/OptionalArgumentOptionSpec.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/ParserRules.java b/gemfire-util/src/main/java/joptsimple/ParserRules.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/ParserRules.java rename to gemfire-util/src/main/java/joptsimple/ParserRules.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/RequiredArgumentOptionSpec.java b/gemfire-util/src/main/java/joptsimple/RequiredArgumentOptionSpec.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/RequiredArgumentOptionSpec.java rename to gemfire-util/src/main/java/joptsimple/RequiredArgumentOptionSpec.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/UnrecognizedOptionException.java b/gemfire-util/src/main/java/joptsimple/UnrecognizedOptionException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/UnrecognizedOptionException.java rename to gemfire-util/src/main/java/joptsimple/UnrecognizedOptionException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/ValueConversionException.java b/gemfire-util/src/main/java/joptsimple/ValueConversionException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/ValueConversionException.java rename to gemfire-util/src/main/java/joptsimple/ValueConversionException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/ValueConverter.java b/gemfire-util/src/main/java/joptsimple/ValueConverter.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/ValueConverter.java rename to gemfire-util/src/main/java/joptsimple/ValueConverter.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/AbbreviationMap.java b/gemfire-util/src/main/java/joptsimple/internal/AbbreviationMap.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/AbbreviationMap.java rename to gemfire-util/src/main/java/joptsimple/internal/AbbreviationMap.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/Classes.java b/gemfire-util/src/main/java/joptsimple/internal/Classes.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/Classes.java rename to gemfire-util/src/main/java/joptsimple/internal/Classes.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/Column.java b/gemfire-util/src/main/java/joptsimple/internal/Column.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/Column.java rename to gemfire-util/src/main/java/joptsimple/internal/Column.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/ColumnWidthCalculator.java b/gemfire-util/src/main/java/joptsimple/internal/ColumnWidthCalculator.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/ColumnWidthCalculator.java rename to gemfire-util/src/main/java/joptsimple/internal/ColumnWidthCalculator.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/ColumnarData.java b/gemfire-util/src/main/java/joptsimple/internal/ColumnarData.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/ColumnarData.java rename to gemfire-util/src/main/java/joptsimple/internal/ColumnarData.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/ConstructorInvokingValueConverter.java b/gemfire-util/src/main/java/joptsimple/internal/ConstructorInvokingValueConverter.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/ConstructorInvokingValueConverter.java rename to gemfire-util/src/main/java/joptsimple/internal/ConstructorInvokingValueConverter.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/MethodInvokingValueConverter.java b/gemfire-util/src/main/java/joptsimple/internal/MethodInvokingValueConverter.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/MethodInvokingValueConverter.java rename to gemfire-util/src/main/java/joptsimple/internal/MethodInvokingValueConverter.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/Objects.java b/gemfire-util/src/main/java/joptsimple/internal/Objects.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/Objects.java rename to gemfire-util/src/main/java/joptsimple/internal/Objects.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/Reflection.java b/gemfire-util/src/main/java/joptsimple/internal/Reflection.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/Reflection.java rename to gemfire-util/src/main/java/joptsimple/internal/Reflection.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/ReflectionException.java b/gemfire-util/src/main/java/joptsimple/internal/ReflectionException.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/ReflectionException.java rename to gemfire-util/src/main/java/joptsimple/internal/ReflectionException.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/internal/Strings.java b/gemfire-util/src/main/java/joptsimple/internal/Strings.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/internal/Strings.java rename to gemfire-util/src/main/java/joptsimple/internal/Strings.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/util/DateConverter.java b/gemfire-util/src/main/java/joptsimple/util/DateConverter.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/util/DateConverter.java rename to gemfire-util/src/main/java/joptsimple/util/DateConverter.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/util/KeyValuePair.java b/gemfire-util/src/main/java/joptsimple/util/KeyValuePair.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/util/KeyValuePair.java rename to gemfire-util/src/main/java/joptsimple/util/KeyValuePair.java diff --git a/gemfire-joptsimple/src/main/java/joptsimple/util/RegexMatcher.java b/gemfire-util/src/main/java/joptsimple/util/RegexMatcher.java similarity index 100% rename from gemfire-joptsimple/src/main/java/joptsimple/util/RegexMatcher.java rename to gemfire-util/src/main/java/joptsimple/util/RegexMatcher.java diff --git a/gemfire-json/src/main/java/org/json/CDL.java b/gemfire-util/src/main/java/org/json/CDL.java similarity index 100% rename from gemfire-json/src/main/java/org/json/CDL.java rename to gemfire-util/src/main/java/org/json/CDL.java diff --git a/gemfire-json/src/main/java/org/json/Cookie.java b/gemfire-util/src/main/java/org/json/Cookie.java similarity index 100% rename from gemfire-json/src/main/java/org/json/Cookie.java rename to gemfire-util/src/main/java/org/json/Cookie.java diff --git a/gemfire-json/src/main/java/org/json/CookieList.java b/gemfire-util/src/main/java/org/json/CookieList.java similarity index 100% rename from gemfire-json/src/main/java/org/json/CookieList.java rename to gemfire-util/src/main/java/org/json/CookieList.java diff --git a/gemfire-json/src/main/java/org/json/HTTP.java b/gemfire-util/src/main/java/org/json/HTTP.java similarity index 100% rename from gemfire-json/src/main/java/org/json/HTTP.java rename to gemfire-util/src/main/java/org/json/HTTP.java diff --git a/gemfire-json/src/main/java/org/json/HTTPTokener.java b/gemfire-util/src/main/java/org/json/HTTPTokener.java similarity index 100% rename from gemfire-json/src/main/java/org/json/HTTPTokener.java rename to gemfire-util/src/main/java/org/json/HTTPTokener.java diff --git a/gemfire-json/src/main/java/org/json/JSONArray.java b/gemfire-util/src/main/java/org/json/JSONArray.java similarity index 100% rename from gemfire-json/src/main/java/org/json/JSONArray.java rename to gemfire-util/src/main/java/org/json/JSONArray.java diff --git a/gemfire-json/src/main/java/org/json/JSONException.java b/gemfire-util/src/main/java/org/json/JSONException.java similarity index 100% rename from gemfire-json/src/main/java/org/json/JSONException.java rename to gemfire-util/src/main/java/org/json/JSONException.java diff --git a/gemfire-json/src/main/java/org/json/JSONML.java b/gemfire-util/src/main/java/org/json/JSONML.java similarity index 100% rename from gemfire-json/src/main/java/org/json/JSONML.java rename to gemfire-util/src/main/java/org/json/JSONML.java diff --git a/gemfire-json/src/main/java/org/json/JSONObject.java b/gemfire-util/src/main/java/org/json/JSONObject.java similarity index 100% rename from gemfire-json/src/main/java/org/json/JSONObject.java rename to gemfire-util/src/main/java/org/json/JSONObject.java diff --git a/gemfire-json/src/main/java/org/json/JSONString.java b/gemfire-util/src/main/java/org/json/JSONString.java similarity index 100% rename from gemfire-json/src/main/java/org/json/JSONString.java rename to gemfire-util/src/main/java/org/json/JSONString.java diff --git a/gemfire-json/src/main/java/org/json/JSONStringer.java b/gemfire-util/src/main/java/org/json/JSONStringer.java similarity index 100% rename from gemfire-json/src/main/java/org/json/JSONStringer.java rename to gemfire-util/src/main/java/org/json/JSONStringer.java diff --git a/gemfire-json/src/main/java/org/json/JSONTokener.java b/gemfire-util/src/main/java/org/json/JSONTokener.java similarity index 100% rename from gemfire-json/src/main/java/org/json/JSONTokener.java rename to gemfire-util/src/main/java/org/json/JSONTokener.java diff --git a/gemfire-json/src/main/java/org/json/JSONWriter.java b/gemfire-util/src/main/java/org/json/JSONWriter.java similarity index 100% rename from gemfire-json/src/main/java/org/json/JSONWriter.java rename to gemfire-util/src/main/java/org/json/JSONWriter.java diff --git a/gemfire-json/src/main/java/org/json/XML.java b/gemfire-util/src/main/java/org/json/XML.java similarity index 100% rename from gemfire-json/src/main/java/org/json/XML.java rename to gemfire-util/src/main/java/org/json/XML.java diff --git a/gemfire-json/src/main/java/org/json/XMLTokener.java b/gemfire-util/src/main/java/org/json/XMLTokener.java similarity index 100% rename from gemfire-json/src/main/java/org/json/XMLTokener.java rename to gemfire-util/src/main/java/org/json/XMLTokener.java diff --git a/gemfire-web/build.gradle b/gemfire-web/build.gradle index c6f236123..c2d9bdfac 100644 --- a/gemfire-web/build.gradle +++ b/gemfire-web/build.gradle @@ -31,12 +31,12 @@ dependencies { runtime "org.springframework:spring-oxm:${springVersion}" runtime 'commons-fileupload:commons-fileupload:1.3.2' - provided "javax.servlet:javax.servlet-api:${servletAPIVersion}" + testCompile "javax.servlet:javax.servlet-api:${servletAPIVersion}" // have to use output since we exclude the dependent classes from jar :( - provided project(subprojectBase + 'gemfire-core').sourceSets.main.output - provided project(subprojectBase + 'gemfire-core') - provided project(subprojectBase + 'gemfire-junit') + testCompile project(subprojectBase + 'gemfire-core').sourceSets.main.output + testCompile project(subprojectBase + 'gemfire-core') + testCompile project(subprojectBase + 'gemfire-junit') } war { diff --git a/gemfirexd/client/build.gradle b/gemfirexd/client/build.gradle index a15c47b78..220873ac3 100644 --- a/gemfirexd/client/build.gradle +++ b/gemfirexd/client/build.gradle @@ -16,12 +16,12 @@ */ plugins { - id 'com.github.johnrengelman.shadow' version '1.2.3' + id 'com.github.johnrengelman.shadow' version '2.0.1' } dependencies { - provided "org.apache.thrift:libthrift:${thriftVersion}" - provided "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" + compileOnly "org.apache.thrift:libthrift:${thriftVersion}" + compileOnly "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" compile project(subprojectBase + 'snappydata-store-shared') } @@ -78,7 +78,7 @@ shadowJar { // avoid conflict with the older incompatible thrift versions relocate 'org.apache.thrift', 'io.snappydata.org.apache.thrift' - configurations = [ project.configurations.runtime, project.configurations.provided ] + configurations = [ project.configurations.runtime, project.configurations.compileOnly ] mergeServiceFiles() } diff --git a/gemfirexd/core/build.gradle b/gemfirexd/core/build.gradle index dcc51f544..aea8d535e 100644 --- a/gemfirexd/core/build.gradle +++ b/gemfirexd/core/build.gradle @@ -16,20 +16,20 @@ */ plugins { - id 'ca.coglinc.javacc' version '2.3.1' - id 'com.github.johnrengelman.shadow' version '1.2.3' + id 'ca.coglinc.javacc' version '2.4.0' + id 'com.github.johnrengelman.shadow' version '2.0.1' } dependencies { compile project(subprojectBase + 'gemfire-core') compile project(subprojectBase + 'snappydata-store-shared') compile project(subprojectBase + 'snappydata-store-client') - provided project(subprojectBase + 'snappydata-store-prebuild') + compileOnly project(subprojectBase + 'snappydata-store-prebuild') - provided files("${System.getProperty('java.home')}/../lib/tools.jar") - provided 'xalan:xalan:2.7.2' - provided 'xalan:serializer:2.7.2' - provided "com.pivotal:pxf-api:${pxfVersion}" + compileOnly files("${System.getProperty('java.home')}/../lib/tools.jar") + compileOnly 'xalan:xalan:2.7.2' + compileOnly 'xalan:serializer:2.7.2' + compileOnly "com.pivotal:pxf-api:${pxfVersion}" compile "org.osgi:org.osgi.core:${osgiVersion}" compile "org.apache.ant:ant:${antVersion}" compile "javax.servlet:javax.servlet-api:${servletAPIVersion}" @@ -37,18 +37,17 @@ dependencies { compile "com.esotericsoftware:kryo-shaded:${kryoVersion}" compile "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" - provided "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-auth:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-common:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" - provided "com.google.protobuf:protobuf-java:${protobufVersion}" - provided "com.sun.jersey:jersey-core:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-server:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" - provided "org.mortbay.jetty:jetty:${hadoopJettyVersion}" - provided "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" - provided "com.google.code.findbugs:jsr305:${jsr305Version}" + compileOnly "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-auth:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-common:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" + compileOnly "com.google.protobuf:protobuf-java:${protobufVersion}" + compileOnly "org.glassfish.jersey.core:jersey-server:${jerseyVersion}" + compileOnly "org.glassfish.jersey.containers:jersey-container-servlet-core:${jerseyVersion}" + compileOnly "org.eclipse.jetty:jetty-server:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-util:${jettyVersion}" + compileOnly "com.google.code.findbugs:jsr305:${jsr305Version}" } // move javacc output directory to a place where IDEA can easily register diff --git a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/GemFireXDVersion.java b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/GemFireXDVersion.java index 8bd02455e..f8462d39b 100644 --- a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/GemFireXDVersion.java +++ b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/GemFireXDVersion.java @@ -43,7 +43,7 @@ public class GemFireXDVersion { static { GemFireCacheImpl.setGFXDSystem(true); isNativeLibLoaded = NativeCalls.getInstance().loadNativeLibrary() - ? SharedLibrary.register("gemfirexd") : false; + && SharedLibrary.register("gemfirexd"); GemFireVersion instance = GemFireVersion.getInstance( GemFireXDVersion.class, SharedUtils.GFXD_VERSION_PROPERTIES); if (isNativeLibLoaded) { diff --git a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/GfxdOpConflationHandler.java b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/GfxdOpConflationHandler.java index d40dc24e3..51676f956 100644 --- a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/GfxdOpConflationHandler.java +++ b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/GfxdOpConflationHandler.java @@ -35,6 +35,7 @@ import com.pivotal.gemfirexd.internal.engine.ddl.ReplayableConflatable; import com.pivotal.gemfirexd.internal.engine.ddl.GfxdDDLQueueEntry; import com.pivotal.gemfirexd.internal.engine.ddl.GfxdDDLRegionQueue; +import com.pivotal.gemfirexd.internal.engine.ddl.catalog.messages.GfxdSystemProcedureMessage; import com.pivotal.gemfirexd.internal.engine.distributed.utils.GemFireXDUtils; import com.pivotal.gemfirexd.internal.iapi.services.sanity.SanityManager; @@ -113,7 +114,12 @@ public boolean doConflate(Conflatable confVal, Object confKey, boolean result = applyConflate(confVal, confKey, confValEntry, removeList, null, collection, removeFromIndex, skipExecuting); // the item being checked will also be conflated in this case - if (result && removeList != null) { + // remove DROP DDLs in every case since those imply a corresponding + // CREATE statement else its an "if exists" case where there was + // no existing entity in which case also it should be cleaned up + if (removeList != null && (result || + (confVal instanceof DDLConflatable) || + (confVal instanceof GfxdSystemProcedureMessage))) { removeList.add(confValEntry); } return result; diff --git a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/Misc.java b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/Misc.java index 4a3512a6d..7bf113ad0 100644 --- a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/Misc.java +++ b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/Misc.java @@ -64,6 +64,7 @@ import com.gemstone.gemfire.internal.cache.TXManagerImpl; import com.gemstone.gemfire.internal.cache.execute.BucketMovedException; import com.gemstone.gemfire.internal.i18n.LocalizedStrings; +import com.gemstone.gemfire.internal.shared.SystemProperties; import com.gemstone.gemfire.internal.snappy.CallbackFactoryProvider; import com.gemstone.gemfire.internal.snappy.StoreCallbacks; import com.gemstone.gemfire.internal.util.DebuggerSupport; @@ -75,7 +76,6 @@ import com.pivotal.gemfirexd.internal.engine.sql.conn.GfxdHeapThresholdListener; import com.pivotal.gemfirexd.internal.engine.store.GemFireStore; import com.pivotal.gemfirexd.internal.iapi.error.DerbySQLException; -import com.pivotal.gemfirexd.internal.iapi.error.PublicAPI; import com.pivotal.gemfirexd.internal.iapi.error.StandardException; import com.pivotal.gemfirexd.internal.iapi.reference.SQLState; import com.pivotal.gemfirexd.internal.iapi.services.context.ContextService; @@ -1351,7 +1351,8 @@ public static StringBuilder histogramToString( return str; } - public static final String SNAPPY_HIVE_METASTORE = "SNAPPY_HIVE_METASTORE"; + public static final String SNAPPY_HIVE_METASTORE = + SystemProperties.SNAPPY_HIVE_METASTORE; public static boolean isSnappyHiveMetaTable(String schemaName) { return SNAPPY_HIVE_METASTORE.equalsIgnoreCase(schemaName); diff --git a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/ddl/GfxdDDLRegion.java b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/ddl/GfxdDDLRegion.java index 768a97b11..56691455e 100644 --- a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/ddl/GfxdDDLRegion.java +++ b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/ddl/GfxdDDLRegion.java @@ -55,6 +55,7 @@ import com.gemstone.gemfire.internal.cache.LocalRegion; import com.gemstone.gemfire.internal.cache.Oplog; import com.gemstone.gemfire.internal.cache.RegionEntry; +import com.gemstone.gemfire.internal.cache.lru.Sizeable; import com.gemstone.gemfire.internal.cache.versions.RegionVersionVector; import com.gemstone.gemfire.internal.shared.Version; import com.gemstone.gnu.trove.TLongHashSet; @@ -623,7 +624,8 @@ static final void releaseLock(GfxdReadWriteLock rwLock, boolean exclusive) { * * @author swale */ - public static final class RegionValue extends GfxdDataSerializable { + public static final class RegionValue extends GfxdDataSerializable + implements Sizeable { private Object value; @@ -662,6 +664,11 @@ public void fromData(DataInput in) throws IOException, this.sequenceId = in.readLong(); } + @Override + public int getSizeInBytes() { + return Misc.getMemStoreBooting().getObjectSizer().sizeof(this.value) + 8; + } + @Override public String toString() { return this.value + " [sequence: " + this.sequenceId + "]"; diff --git a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GemFireContainer.java b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GemFireContainer.java index 0ff83fa06..f9a6bcee6 100644 --- a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GemFireContainer.java +++ b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GemFireContainer.java @@ -529,9 +529,9 @@ public ExternalTableMetaData fetchHiveMetaData(boolean refresh) { public static String getRowBufferTableName(String columnBatchTableName) { String tableName = columnBatchTableName.replace( - StoreCallbacks.SHADOW_SCHEMA_NAME_WITH_SEPARATOR, ""); + SystemProperties.SHADOW_SCHEMA_NAME_WITH_SEPARATOR, ""); return tableName.substring(0, tableName.length() - - StoreCallbacks.SHADOW_TABLE_SUFFIX.length()); + SystemProperties.SHADOW_TABLE_SUFFIX.length()); } public boolean cachesGlobalIndex() { @@ -5100,7 +5100,7 @@ public final boolean isColumnStore() { // object store, but still added the check for possible future use // (e.g. local index table on column store) return isObjectStore() && - this.tableName.endsWith(StoreCallbacks.SHADOW_TABLE_SUFFIX); + this.tableName.endsWith(SystemProperties.SHADOW_TABLE_SUFFIX); } public final boolean isOffHeap() { diff --git a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GemFireStore.java b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GemFireStore.java index 31af34d8d..d4e760e7e 100644 --- a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GemFireStore.java +++ b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GemFireStore.java @@ -1636,6 +1636,14 @@ public String getBootProperty(String propName) { return this.serviceProperties.getProperty(propName); } + public void setBootProperty(String propName, String propValue) { + if (propValue != null) { + this.serviceProperties.setProperty(propName, propValue); + } else { + this.serviceProperties.remove(propName); + } + } + public Map getBootProperties() { return Collections.unmodifiableMap(this.serviceProperties); } diff --git a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GfxdObjectSizer.java b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GfxdObjectSizer.java index 06832ea83..ce7df8984 100644 --- a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GfxdObjectSizer.java +++ b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/internal/engine/store/GfxdObjectSizer.java @@ -21,6 +21,7 @@ import com.gemstone.gemfire.cache.Declarable; import com.gemstone.gemfire.cache.util.ObjectSizer; +import com.gemstone.gemfire.internal.cache.CachedDeserializableFactory; import com.gemstone.gemfire.internal.cache.Token; import com.gemstone.gemfire.internal.size.ReflectionSingleObjectSizer; import com.pivotal.gemfirexd.internal.engine.jdbc.GemFireXDRuntimeException; @@ -83,13 +84,13 @@ else if (c == Long.class) { return (Long.SIZE / 8) + ReflectionSingleObjectSizer.OBJECT_SIZE; } else if (c == DataValueDescriptor[].class) { - DataValueDescriptor[] tmpdvdarr = (DataValueDescriptor[])o; + DataValueDescriptor[] dvdArr = (DataValueDescriptor[])o; int size = 0; - for (int i = 0; i < tmpdvdarr.length; i++) { - size += tmpdvdarr[i].getLengthInBytes(null); + for (DataValueDescriptor dvd : dvdArr) { + size += dvd.getLengthInBytes(null); size += ReflectionSingleObjectSizer.OBJECT_SIZE; } - size += tmpdvdarr.length * ReflectionSingleObjectSizer.REFERENCE_SIZE; + size += dvdArr.length * ReflectionSingleObjectSizer.REFERENCE_SIZE; size += ReflectionSingleObjectSizer.OBJECT_SIZE; return size; } @@ -98,11 +99,8 @@ else if (c == byte[].class) { } else if (Token.isInvalidOrRemoved(o)) { return 0; - } - else { - throw GemFireXDRuntimeException.newRuntimeException( - "unknown data type passed to GfxdObjectSizer: " + o.getClass(), - null); + } else { + return CachedDeserializableFactory.calcMemSize(o); } } catch (StandardException e) { throw GemFireXDRuntimeException.newRuntimeException( diff --git a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/tools/sizer/ObjectSizer.java b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/tools/sizer/ObjectSizer.java index c6170660f..f93a3d571 100644 --- a/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/tools/sizer/ObjectSizer.java +++ b/gemfirexd/core/src/main/java/com/pivotal/gemfirexd/tools/sizer/ObjectSizer.java @@ -66,8 +66,8 @@ import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl.Chunk; import com.gemstone.gemfire.internal.offheap.annotations.Released; import com.gemstone.gemfire.internal.offheap.annotations.Retained; +import com.gemstone.gemfire.internal.shared.SystemProperties; import com.gemstone.gemfire.internal.size.ReflectionSingleObjectSizer; -import com.gemstone.gemfire.internal.snappy.StoreCallbacks; import com.pivotal.gemfirexd.Constants; import com.pivotal.gemfirexd.Constants.QueryHints.SizerHints; import com.pivotal.gemfirexd.internal.engine.Misc; @@ -996,8 +996,8 @@ else if (GatewaySenderEventImpl.class.isAssignableFrom(valClass)) { } private static final Pattern columnTableRegex = - Pattern.compile(".*" + StoreCallbacks.SHADOW_SCHEMA_NAME + "(.*)" + - StoreCallbacks.SHADOW_TABLE_SUFFIX); + Pattern.compile(".*" + SystemProperties.SHADOW_SCHEMA_NAME + "(.*)" + + SystemProperties.SHADOW_TABLE_SUFFIX); private Boolean isColumnTable(String fullyQualifiedTable) { return columnTableRegex.matcher(fullyQualifiedTable).matches(); } diff --git a/gemfirexd/prebuild/build.gradle b/gemfirexd/prebuild/build.gradle index 4138fcb98..8bcfd1c5a 100644 --- a/gemfirexd/prebuild/build.gradle +++ b/gemfirexd/prebuild/build.gradle @@ -18,7 +18,7 @@ dependencies { compile project(subprojectBase + 'snappydata-store-shared') - provided files("${System.getProperty('java.home')}/../lib/tools.jar") - provided "org.apache.ant:ant:${antVersion}" + compileOnly files("${System.getProperty('java.home')}/../lib/tools.jar") + compileOnly "org.apache.ant:ant:${antVersion}" } jar.baseName = 'snappydata-store-prebuild' diff --git a/gemfirexd/shared/build.gradle b/gemfirexd/shared/build.gradle index 497df527a..d4adeb971 100644 --- a/gemfirexd/shared/build.gradle +++ b/gemfirexd/shared/build.gradle @@ -16,8 +16,8 @@ */ dependencies { - provided "org.apache.thrift:libthrift:${thriftVersion}" - provided "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" + compileOnly "org.apache.thrift:libthrift:${thriftVersion}" + compileOnly "org.apache.spark:spark-unsafe_${scalaBinaryVersion}:${sparkVersion}" compile project(subprojectBase + 'gemfire-shared') } diff --git a/gemfirexd/tools/build.gradle b/gemfirexd/tools/build.gradle index 11b912aad..7b6d04ade 100644 --- a/gemfirexd/tools/build.gradle +++ b/gemfirexd/tools/build.gradle @@ -16,8 +16,8 @@ */ plugins { - id 'ca.coglinc.javacc' version '2.3.1' - id 'com.github.johnrengelman.shadow' version '1.2.3' + id 'ca.coglinc.javacc' version '2.4.0' + id 'com.github.johnrengelman.shadow' version '2.0.1' } artifacts { @@ -25,11 +25,11 @@ artifacts { } dependencies { - provided project(subprojectBase + 'snappydata-store-core') + compileOnly project(subprojectBase + 'snappydata-store-core') compile project(subprojectBase + 'snappydata-store-hibernate') // ddlutils dependencies - compile 'commons-beanutils:commons-beanutils:1.9.2' + compile 'commons-beanutils:commons-beanutils:1.9.3' compile 'commons-codec:commons-codec:1.10' compile 'commons-collections:commons-collections:3.2.2' compile 'commons-configuration:commons-configuration:1.10' @@ -52,35 +52,43 @@ dependencies { testCompile project(subprojectBase + 'snappydata-store-client') testCompile project(subprojectBase + 'gemfire-junit') - provided files("${System.getProperty('java.home')}/../lib/tools.jar") - provided 'xalan:xalan:2.7.2' - provided 'xalan:serializer:2.7.2' - provided "com.pivotal:pxf-api:${pxfVersion}" - - provided "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-auth:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-common:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" - provided "com.google.protobuf:protobuf-java:${protobufVersion}" - provided "com.sun.jersey:jersey-core:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-server:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" - provided "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" - provided "org.mortbay.jetty:jetty:${hadoopJettyVersion}" - provided "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" - provided "org.eclipse.jetty:jetty-http:${jettyVersion}" - provided "org.eclipse.jetty:jetty-io:${jettyVersion}" - provided "org.eclipse.jetty:jetty-security:${jettyVersion}" - provided "org.eclipse.jetty:jetty-server:${jettyVersion}" - provided "org.eclipse.jetty:jetty-servlet:${jettyVersion}" - provided "org.eclipse.jetty:jetty-util:${jettyVersion}" - provided "org.eclipse.jetty:jetty-webapp:${jettyVersion}" - provided "org.eclipse.jetty:jetty-xml:${jettyVersion}" - provided "com.google.code.findbugs:jsr305:${jsr305Version}" - + compileOnly files("${System.getProperty('java.home')}/../lib/tools.jar") + compileOnly 'xalan:xalan:2.7.2' + compileOnly 'xalan:serializer:2.7.2' + compileOnly "com.pivotal:pxf-api:${pxfVersion}" + + compileOnly "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-auth:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-common:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" + compileOnly "com.google.protobuf:protobuf-java:${protobufVersion}" + compileOnly "org.glassfish.jersey.core:jersey-server:${jerseyVersion}" + compileOnly "org.glassfish.jersey.containers:jersey-container-servlet-core:${jerseyVersion}" + compileOnly "org.eclipse.jetty:jetty-http:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-io:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-security:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-server:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-servlet:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-util:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-webapp:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-xml:${jettyVersion}" + compileOnly "com.google.code.findbugs:jsr305:${jsr305Version}" + + testCompile "org.apache.hbase:hbase-common:${hbaseVersion}" + testCompile "org.apache.hbase:hbase-protocol:${hbaseVersion}" + testCompile "org.apache.hbase:hbase-client:${hbaseVersion}" + testCompile "org.apache.hbase:hbase-server:${hbaseVersion}" + testCompile "com.sun.jersey:jersey-core:${sunJerseyVersion}" + testCompile "com.sun.jersey:jersey-server:${sunJerseyVersion}" + testCompile "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" + testCompile "org.mortbay.jetty:jetty:${hadoopJettyVersion}" + testCompile "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" + testCompile 'org.cloudera.htrace:htrace-core:2.05' testCompile "org.apache.hadoop:hadoop-common:${hadoopVersion}:tests" testCompile "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}:tests" + + testCompile "com.pivotal:pxf-api:${pxfVersion}" testCompile "org.apache.ant:ant-launcher:${antVersion}" testCompile "org.apache.derby:derby:${derbyVersion}" testCompile "org.apache.derby:derbynet:${derbyVersion}" @@ -109,8 +117,7 @@ task deleteGenerated(type: Delete) { clean.dependsOn deleteGenerated sourceSets.main.java.srcDirs = [ 'src/main/java', javaccOut, 'src/ddlutils/java' ] -sourceSets.test.java.srcDirs = [ 'src/test/java', 'src/testing/java', 'src/dunit/java', - 'src/demo/java', 'src/test-ddlutils/java' ] +sourceSets.test.java.srcDirs = [ 'src/test/java', 'src/testing/java', 'src/dunit/java', 'src/demo/java' ] task generatePropertiesFiles(dependsOn: 'processResources') { def infoDir = file("${buildDir}/resources/main/com/pivotal/gemfirexd/internal/info") diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServer2DUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServer2DUnit.java new file mode 100644 index 000000000..e12d692bd --- /dev/null +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServer2DUnit.java @@ -0,0 +1,798 @@ +/* + * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package com.pivotal.gemfirexd; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.Locale; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; + +import com.gemstone.gemfire.cache.CacheClosedException; +import com.gemstone.gemfire.cache.CacheException; +import com.gemstone.gemfire.cache.CacheFactory; +import com.gemstone.gemfire.cache.DataPolicy; +import com.gemstone.gemfire.cache.Scope; +import com.gemstone.gemfire.cache.execute.Function; +import com.gemstone.gemfire.cache.execute.FunctionContext; +import com.gemstone.gemfire.cache.execute.FunctionException; +import com.gemstone.gemfire.cache.execute.FunctionService; +import com.gemstone.gemfire.cache.execute.ResultCollector; +import com.gemstone.gemfire.distributed.DistributedMember; +import com.gemstone.gemfire.distributed.DistributedSystem; +import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem; +import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember; +import com.gemstone.gemfire.internal.AvailablePort; +import com.gemstone.gemfire.internal.SocketCreator; +import com.gemstone.gemfire.internal.cache.PartitionedRegion; +import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation; +import com.pivotal.gemfirexd.internal.engine.GfxdConstants; +import com.pivotal.gemfirexd.internal.engine.Misc; +import com.pivotal.gemfirexd.internal.engine.distributed.GfxdListResultCollector; +import com.pivotal.gemfirexd.internal.engine.store.ServerGroupUtils; +import com.pivotal.gemfirexd.internal.iapi.util.StringUtil; +import io.snappydata.test.dunit.RMIException; +import io.snappydata.test.dunit.SerializableCallable; +import io.snappydata.test.dunit.SerializableRunnable; +import io.snappydata.test.dunit.VM; +import org.apache.derbyTesting.junit.JDBC; + +public class ClientServer2DUnit extends ClientServerTestBase { + + public ClientServer2DUnit(String name) { + super(name); + } + + public void testPersistentDD() throws Exception { + // Start one server + AsyncVM async1 = invokeStartServerVM(1, 0, null, null); + // Start a second server with DD persistence + Properties props = new Properties(); + props.setProperty(com.pivotal.gemfirexd.Attribute.SYS_PERSISTENT_DIR, "SYS"); + AsyncVM async2 = invokeStartServerVM(2, 0, null, props); + + // Start a client + startClientVMs(1, 0, null); + + // wait for servers to start + joinVMs(true, async1, async2); + + // Create a table + clientSQLExecute(1, "create table TESTTABLE (ID int not null, " + + "DESCRIPTION varchar(1024) not null)"); + + // Also try creating the same table with a different user name + String userName = "TesT1"; + // just in the very remote case this clashes with random user name + if (userName.equalsIgnoreCase(TestUtil.currentUserName)) { + userName = "TestT2"; + } + final String userSchemaName = StringUtil.SQLToUpperCase(userName); + executeForUser(userName, "create table TESTTABLE (ID int not null, " + + "DESCRIPTION varchar(1024) not null)"); + + final String schemaName = getCurrentDefaultSchemaName(); + // Check the region properties + RegionAttributesCreation[] expectedAttrs = checkTestTableProperties( + schemaName); + RegionAttributesCreation[] userExpectedAttrs = checkTestTableProperties( + userSchemaName); + + // Restart everything and recheck + stopVMNums(-1, -2, 1); + // verify that nothing is running + checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), + this.serverVMs.get(1)); + + async1 = restartServerVMAsync(1, 0, null, null); + async2 = restartServerVMAsync(2, 0, null, props); + joinVMs(false, async1, async2); + restartVMNums(1); + + // Check the region properties on server with DD persistence + serverVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[0]); + serverVerifyRegionProperties(1, userSchemaName, "TESTTABLE", + userExpectedAttrs[0]); + // Check that region exists on other server and client due to GII from + // persisted server + serverVerifyRegionProperties(2, schemaName, "TESTTABLE", expectedAttrs[0]); + serverVerifyRegionProperties(2, userSchemaName, "TESTTABLE", + userExpectedAttrs[0]); + clientVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[1]); + clientVerifyRegionProperties(1, userSchemaName, "TESTTABLE", + userExpectedAttrs[1]); + + // Also check that stale DD persisted data is overridden. + stopVMNums(-1, 1); + + serverSQLExecute(2, "drop table TESTTABLE"); + executeOnServerForUser(2, userName, "drop table testtable"); + serverSQLExecute(2, "create table TESTTABLE (ID int not null, " + + "DESCRIPTION varchar(1024) not null) replicate"); + executeOnServerForUser(2, userName, "create table TESTTABLE (ID int " + + "not null, DESCRIPTION varchar(1024) not null) replicate"); + + restartVMNums(1, -1); + + expectedAttrs[0] = new RegionAttributesCreation(); + expectedAttrs[0].setScope(Scope.DISTRIBUTED_ACK); + expectedAttrs[0].setDataPolicy(DataPolicy.REPLICATE); + expectedAttrs[0].setInitialCapacity(GfxdConstants.DEFAULT_INITIAL_CAPACITY); + expectedAttrs[0].setConcurrencyChecksEnabled(false); + expectedAttrs[0].setAllHasFields(true); + expectedAttrs[0].setHasDiskDirs(false); + expectedAttrs[0].setHasDiskWriteAttributes(false); + serverVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[0]); + serverVerifyRegionProperties(1, userSchemaName, "TESTTABLE", + expectedAttrs[0]); + serverVerifyRegionProperties(2, userSchemaName, "TESTTABLE", + expectedAttrs[0]); + serverVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[0]); + + expectedAttrs[1] = new RegionAttributesCreation(expectedAttrs[0], false); + expectedAttrs[1].setDataPolicy(DataPolicy.EMPTY); + expectedAttrs[1].setConcurrencyChecksEnabled(false); + expectedAttrs[1].setHasDiskDirs(false); + expectedAttrs[1].setHasDiskWriteAttributes(false); + clientVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[1]); + clientVerifyRegionProperties(1, userSchemaName, "TESTTABLE", + expectedAttrs[1]); + + // Stop everything and check that new table properties are being persisted. + stopVMNums(-1, 1); + stopVMNums(-2); + // start persisted DD VM first using other VM's DD + joinVM(false, restartServerVMAsync(1, 0, null, props)); + props = new Properties(); + props.setProperty(com.pivotal.gemfirexd.Attribute.GFXD_PERSIST_DD, "true"); + async2 = restartServerVMAsync(2, 0, null, props); + joinVM(false, async2); + restartVMNums(1); + + // Check the region properties on server with DD persistence + serverVerifyRegionProperties(2, schemaName, "TESTTABLE", expectedAttrs[0]); + serverVerifyRegionProperties(2, userSchemaName, "TESTTABLE", + expectedAttrs[0]); + // Check that region exists on other server and client due to GII from + // persisted server + serverVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[0]); + serverVerifyRegionProperties(1, userSchemaName, "TESTTABLE", + expectedAttrs[0]); + clientVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[1]); + clientVerifyRegionProperties(1, userSchemaName, "TESTTABLE", + expectedAttrs[1]); + + // Drop the table + clientSQLExecute(1, "drop table TESTTABLE"); + executeForUser(userName, "drop table testTable"); + } + + public void testInitialScripts() throws Exception { + String testsDir = TestUtil.getResourcesDir(); + + TestUtil.deletePersistentFiles = true; + // Start one server + AsyncVM async1 = invokeStartServerVM(1, 0, null, null); + // Start a second server with initial script + Properties props = new Properties(); + props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir + + "/lib/checkInitialScript.sql"); + AsyncVM async2 = invokeStartServerVM(2, 0, null, props); + + // wait for servers to start + joinVMs(true, async1, async2); + + // Start a client + startVMs(1, 0); + + // check that regions have been created but with no data + String ckFile = testsDir + "/lib/checkQuery.xml"; + + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select cid, addr, tid from trade.customers", ckFile, "empty"); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select cid, qty, tid from trade.portfolio", ckFile, "empty"); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select tc.cid, tp.tid, cust_name, availQty from trade.portfolio tp, " + + "trade.customers tc where tp.cid=tc.cid", ckFile, "empty"); + + // drop the tables before restart + clientSQLExecute(1, "drop table trade.portfolio"); + clientSQLExecute(1, "drop table trade.customers"); + + // Restart with both the initial SQL scripts + stopVMNums(1, -1, -2); + // verify that nothing is running + checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), this.serverVMs + .get(1)); + + props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir + + "/lib/checkInitialScript.sql," + testsDir + + "/lib/checkInitialScript2.sql"); + + async2 = restartServerVMAsync(2, 0, null, props); + async1 = restartServerVMAsync(1, 0, null, null); + restartVMNums(1); + joinVMs(false, async1, async2); + + // check that data has been correctly populated + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select cid, addr, tid from trade.customers", ckFile, "dd_cust_insert"); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select cid, qty, tid from trade.portfolio", ckFile, "is_port"); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select tc.cid, tp.tid, cust_name, availQty from trade.portfolio tp, " + + "trade.customers tc where tp.cid=tc.cid", ckFile, "is_cust_port"); + + // Restart and check failure with SQL scripts in incorrect order + stopVMNums(-2); + + // drop the tables before restart + clientSQLExecute(1, "drop table trade.portfolio"); + clientSQLExecute(1, "drop table trade.customers"); + + stopVMNums(1, -1); + + // verify that nothing is running + checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), this.serverVMs + .get(1)); + + restartVMNums(-1, 1); + + props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir + + "/lib/checkInitialScript2.sql," + testsDir + + "/lib/checkInitialScript.sql"); + try { + joinVM(false, restartServerVMAsync(2, 0, null, props)); + fail("Expected an SQLException while starting the VM."); + } catch (RMIException ex) { + if (ex.getCause() instanceof SQLException) { + SQLException sqlEx = (SQLException)ex.getCause(); + if (!"XJ040".equals(sqlEx.getSQLState())) { + throw ex; + } else { + // Explicitly delete the newly timestamped persistent file. + this.serverVMs.get(1).invoke(DistributedSQLTestBase.class, + "deleteDataDictionaryDir"); + } + } else { + throw ex; + } + } + // verify that failed server is not running + checkVMsDown(this.serverVMs.get(1)); + + // Restart everything and check that init script fails + // with already existing table + stopVMNums(-1, 1); + // verify that nothing is running + checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), this.serverVMs + .get(1)); + + props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir + + "/lib/checkInitialScript.sql"); + async2 = restartServerVMAsync(2, 0, null, props); + restartVMNums(1); + joinVM(false, async2); + + addExpectedException(new int[]{1}, new int[]{2}, SQLException.class); + props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir + + "/lib/checkInitialScript.sql," + testsDir + + "/lib/checkInitialScript2.sql"); + try { + joinVM(false, restartServerVMAsync(1, 0, null, props)); + fail("Expected an SQLException while starting the VM."); + } catch (RMIException ex) { + if (ex.getCause() instanceof SQLException) { + SQLException sqlEx = (SQLException)ex.getCause(); + if (!"XJ040".equals(sqlEx.getSQLState())) { + throw ex; + } else { + // Explicitly delete the newly timestamped persistent file. + this.serverVMs.get(0).invoke(DistributedSQLTestBase.class, + "deleteDataDictionaryDir"); + } + } else { + throw ex; + } + } + // verify that failed server is not running + checkVMsDown(this.serverVMs.get(0)); + removeExpectedException(new int[]{1}, new int[]{2}, + SQLException.class); + + // Restart everything and check that init script succeeds + // with already existing table when loading only data + stopVMNums(1, -2); + // verify that nothing is running + checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), this.serverVMs + .get(1)); + + restartVMNums(1, -2); + + addExpectedException(new int[]{1}, new int[]{2}, SQLException.class); + props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir + + "/lib/checkInitialScript2.sql"); + joinVM(false, restartServerVMAsync(1, 0, null, props)); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select cid, addr, tid from trade.customers", ckFile, "dd_cust_insert"); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select cid, qty, tid from trade.portfolio", ckFile, "is_port"); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select tc.cid, tp.tid, cust_name, availQty from trade.portfolio tp, " + + "trade.customers tc where tp.cid=tc.cid", ckFile, "is_cust_port"); + + // Drop tables, start and recheck to see everything is in order. + serverSQLExecute(2, "drop table trade.portfolio"); + serverSQLExecute(2, "drop table trade.customers"); + stopVMNums(-1); + props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir + + "/lib/checkInitialScript.sql," + testsDir + + "/lib/checkInitialScript2.sql"); + joinVM(false, restartServerVMAsync(1, 0, null, props)); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select cid, addr, tid from trade.customers", ckFile, "dd_cust_insert"); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select cid, qty, tid from trade.portfolio", ckFile, "is_port"); + sqlExecuteVerify(new int[]{1}, new int[]{1, 2}, + "select tc.cid, tp.tid, cust_name, availQty from trade.portfolio tp, " + + "trade.customers tc where tp.cid=tc.cid", ckFile, "is_cust_port"); + } + + /** + * Bug#46682 test: do not allow a node to join the cluster if its locale is + * different from the cluster. + * The derby 'territory' property allows us to change the locale of the + * database. + */ + public void testStartUpWithLocaleSet() throws Exception { + stopAllVMs(); + Properties props = new Properties(); + // start some servers + startVMs(0, 1); + startVMs(0, 1); + startVMs(0, 1); + // now change the 'territory' (and hence the database locale) for the next + // VM + String locale1Str = new Locale("fr", "CA").toString(); + // choose a different locale than the one already set + if (!Locale.getDefault().toString().equals(locale1Str)) { + props.setProperty("territory", locale1Str); + } else { + props.setProperty("territory", new Locale("en", "GB").toString()); + } + try { + startVMs(0, 1, 0, null, props); + fail("This test should have failed with GemFireXDRuntimeException." + + "Locale of all nodes in the cluser needs to be same"); + } catch (RMIException e) { + if (!e.getCause().getCause().getMessage() + .startsWith("Locale should be same on all nodes in the cluster")) { + fail("Test failed with unexpected exception :", e); + } + } finally { + stopAllVMs(); + } + } + + /** + * Test for checking routing using server groups + * {@link ServerGroupUtils#onServerGroups}. + */ + public void testServerGroupsRouting() throws Exception { + // start some servers in different server groups and a client + AsyncVM async1 = invokeStartServerVM(1, 0, "SG1", null); + AsyncVM async2 = invokeStartServerVM(2, 0, "SG2", null); + AsyncVM async3 = invokeStartServerVM(3, 0, "SG1, SG3", null); + startClientVMs(1, 0, "SG2, SG4", null); + // wait for servers to start + joinVMs(true, async1, async2, async3); + + // register the function on all the VMs + SerializableRunnable registerFn = new SerializableRunnable( + "register function") { + @Override + public void run() throws CacheException { + FunctionService.registerFunction(new TestFunction()); + } + }; + serverExecute(1, registerFn); + serverExecute(2, registerFn); + serverExecute(3, registerFn); + clientExecute(1, registerFn); + + DistributedMember server1 = getMemberForVM(this.serverVMs.get(0)); + DistributedMember server2 = getMemberForVM(this.serverVMs.get(1)); + DistributedMember server3 = getMemberForVM(this.serverVMs.get(2)); + DistributedMember client1 = getMemberForVM(this.clientVMs.get(0)); + + List resultMembers = executeOnServerGroups("SG3"); + checkMembersEqual(resultMembers, server3); + + resultMembers = executeOnServerGroups("SG2"); + checkMembersEqual(resultMembers, server2, client1); + + resultMembers = executeOnServerGroups("SG4"); + checkMembersEqual(resultMembers, client1); + + resultMembers = executeOnServerGroups("SG2,SG1"); + checkMembersEqual(resultMembers, server1, server2, server3, client1); + + resultMembers = executeOnServerGroups("SG3, SG2"); + checkMembersEqual(resultMembers, server2, server3, client1); + + resultMembers = executeOnServerGroups("SG3, SG4"); + checkMembersEqual(resultMembers, server3, client1); + + // check for execution on all servers with no server groups + resultMembers = executeOnServerGroups(""); + checkMembersEqual(resultMembers, server1, server2, server3); + resultMembers = executeOnServerGroups(null); + checkMembersEqual(resultMembers, server1, server2, server3); + resultMembers = executeOnServerGroups(" "); + checkMembersEqual(resultMembers, server1, server2, server3); + + // check for exception in case of no members with given server groups + try { + executeOnServerGroups("SG5"); + fail("expected function exception"); + } catch (FunctionException ex) { + // ignore expected exception + } + + // check for null results too + GfxdListResultCollector gfxdRC = new GfxdListResultCollector(); + ResultCollector rc = ServerGroupUtils + .onServerGroups("SG1, SG2, SG3, SG4", false).withCollector(gfxdRC) + .execute(TestFunction.ID); + List result = (List)rc.getResult(); + assertEquals("expected number of results: 4", 4, result.size()); + for (Object res : result) { + assertNull("expected null result", res); + } + } + + /** + * Test for checking execution on server groups and nodes using GROUPS() and + * ID() builtin. + */ + public void testNodeAndServerGroupsExecution() throws Exception { + // start some servers in different server groups and a client + AsyncVM async1 = invokeStartServerVM(1, 0, "SG1", null); + AsyncVM async2 = invokeStartServerVM(2, 0, "SG2", null); + AsyncVM async3 = invokeStartServerVM(3, 0, "SG1, SG3", null); + startClientVMs(1, 0, "SG2, SG4", null); + // wait for servers to start + joinVMs(true, async1, async2, async3); + + // create a table and insert some rows + clientSQLExecute(1, "create table EMP.TESTTABLE (id int primary key, " + + "addr varchar(100))"); + final PreparedStatement pstmt = TestUtil.jdbcConn + .prepareStatement("insert into EMP.TESTTABLE values (?, ?)"); + for (int index = 1; index <= 100; ++index) { + pstmt.setInt(1, index); + pstmt.setString(2, "ADDR" + index); + pstmt.execute(); + } + + // now try execute of a query using GROUPS() + int numResults = 0; + final Statement stmt = TestUtil.jdbcConn.createStatement(); + ResultSet rs = stmt.executeQuery("select * from EMP.TESTTABLE " + + "where GROUPS() like 'SG1%'"); + while (rs.next()) { + ++numResults; + } + rs = stmt.executeQuery("select * from EMP.TESTTABLE " + + "where GROUPS() = 'SG2'"); + while (rs.next()) { + ++numResults; + } + assertEquals(100, numResults); + + numResults = 0; + rs = stmt.executeQuery("select * from EMP.TESTTABLE where GROUPS() like " + + "'%SG3' or 'SG2' = GROUPS() or GROUPS() = 'SG1'"); + while (rs.next()) { + ++numResults; + } + assertEquals(100, numResults); + + // check zero results for server groups with no servers + rs = stmt.executeQuery("select * from EMP.TESTTABLE " + + "where GROUPS() like '%SG4'"); + assertFalse(rs.next()); + rs = stmt.executeQuery("select * from EMP.TESTTABLE " + + "where GROUPS() = 'SG3'"); + assertFalse(rs.next()); + + // get the member IDs + DistributedMember server1 = getMemberForVM(this.serverVMs.get(0)); + DistributedMember server2 = getMemberForVM(this.serverVMs.get(1)); + DistributedMember server3 = getMemberForVM(this.serverVMs.get(2)); + DistributedMember client1 = getMemberForVM(this.clientVMs.get(0)); + + // query execution using DSID() builtin + numResults = 0; + rs = stmt.executeQuery("select * from EMP.TESTTABLE where DSID() = '" + + server1.toString() + "' or DSID() = '" + server2.toString() + + "' or DSID() = '" + server3.toString() + "'"); + while (rs.next()) { + ++numResults; + } + assertEquals(100, numResults); + + // check zero results on client + rs = stmt.executeQuery("select * from EMP.TESTTABLE where DSID() = '" + + client1.toString() + "'"); + assertFalse(rs.next()); + rs = stmt.executeQuery("select * from EMP.TESTTABLE where DSID() = '1'"); + assertFalse(rs.next()); + } + + /** + * Test if client is successfully able to failover to secondary locators for + * control connection specified with "secodary-locators" property if the + * primary one is unavailable at the time of the first connection (#47486). + */ + public void testNetworkClientFailoverMultipleLocators() throws Throwable { + // start some locators and couple of servers + int locPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); + int locPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); + int locPort3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); + + final String address = SocketCreator.getLocalHost().getHostAddress(); + // try with both host:port and host[port] for client connection + String locators = address + '[' + locPort1 + "]," + address + '[' + + locPort2 + "]," + address + '[' + locPort3 + ']'; + + Properties props = new Properties(); + props.setProperty("locators", locators); + props.setProperty("mcast-port", "0"); + startLocatorVM(address, locPort1, null, props); + startLocatorVM(address, locPort2, null, props); + startLocatorVM(address, locPort3, null, props); + + // now a server and a peer client + startVMs(1, 1, 0, null, props); + + // start network servers on all members + final int netPort1 = startNetworkServer(1, null, null); + final int netPort2 = startNetworkServer(2, null, null); + final int netPort3 = startNetworkServer(3, null, null); + startNetworkServer(4, null, null); + + // now client connections using all locators, second and third locators and + // only third locator + final String secondaryLocators = address + ':' + netPort2 + ',' + address + + ':' + netPort3; + final String secondaryLocators2 = address + '[' + netPort2 + "]," + address + + '[' + netPort3 + ']'; + + Connection netConn1 = TestUtil.getNetConnection(address, netPort1, + ";secondary-locators=" + secondaryLocators, null); + Statement stmt = netConn1.createStatement(); + stmt.execute("create table T.TESTTABLE (ID int primary key, " + + "DESCRIPTION varchar(1024) not null) redundancy 1"); + + final AtomicInteger id = new AtomicInteger(1); + SerializableCallable testConn = new SerializableCallable() { + @Override + public Object call() { + /* + System.setProperty("gemfirexd.debug.true", "TraceClientHA"); + SanityManager.TraceClientHA = true; + */ + try { + final Connection netConn; + final Properties props; + final int currId = id.get(); + switch (currId % 4) { + case 1: + netConn = TestUtil.getNetConnection(address, netPort1, + ";secondary-locators=" + secondaryLocators2, null); + break; + case 2: + props = new Properties(); + props.setProperty("secondary-locators", secondaryLocators); + netConn = TestUtil.getNetConnection(address, netPort1, null, + props); + break; + case 3: + props = new Properties(); + props.setProperty("secondary-locators", secondaryLocators2); + netConn = TestUtil.getNetConnection(address, netPort1, null, + props); + break; + default: + netConn = TestUtil.getNetConnection(address, netPort1, + ";secondary-locators=" + secondaryLocators, null); + break; + } + Statement stmt = netConn.createStatement(); + assertEquals(1, stmt.executeUpdate("insert into T.TESTTABLE values (" + + currId + ",'DESC" + currId + "')")); + return id.incrementAndGet(); + } catch (Throwable t) { + return t; + } + } + }; + + // stop the locators and test for net connections from all JVMs + checkAndSetId(id, testConn.call()); + stopVMNum(-1); + checkAndSetId(id, testConn.call()); + + checkAndSetId(id, serverExecute(1, testConn)); + stopVMNum(-2); + checkAndSetId(id, serverExecute(1, testConn)); + + checkAndSetId(id, serverExecute(2, testConn)); + stopVMNum(-3); + checkAndSetId(id, serverExecute(2, testConn)); + + checkAndSetId(id, testConn.call()); + checkAndSetId(id, serverExecute(1, testConn)); + checkAndSetId(id, serverExecute(2, testConn)); + + // executing from new server must fail since no locator is up at this point + try { + checkAndSetId(id, serverExecute(4, testConn)); + fail("expected connection failure with no locator available"); + } catch (SQLException sqle) { + if (!"08006".equals(sqle.getSQLState()) + && !"X0Z01".equals(sqle.getSQLState()) + && !"40XD0".equals(sqle.getSQLState())) { + fail("unexpected exception", sqle); + } + } + + // finally verify the results + SerializableCallable verify = new SerializableCallable() { + @Override + public Object call() { + try { + final Connection netConn = TestUtil.getNetConnection(address, + netPort1, ";secondary-locators=" + secondaryLocators, null); + Statement stmt = netConn.createStatement(); + ResultSet rs = stmt.executeQuery("select * from T.TESTTABLE"); + final Object[][] expectedResults = new Object[][]{ + new Object[]{1, "DESC1"}, new Object[]{2, "DESC2"}, + new Object[]{3, "DESC3"}, new Object[]{4, "DESC4"}, + new Object[]{5, "DESC5"}, new Object[]{6, "DESC6"}, + new Object[]{7, "DESC7"}, new Object[]{8, "DESC8"}, + new Object[]{9, "DESC9"},}; + JDBC.assertUnorderedResultSet(rs, expectedResults, false); + return id.get(); + } catch (Throwable t) { + return t; + } + } + }; + checkAndSetId(id, verify.call()); + checkAndSetId(id, serverExecute(1, verify)); + checkAndSetId(id, serverExecute(2, verify)); + } + + private void checkVMsDown(VM... vms) { + SerializableRunnable noGFE = new SerializableRunnable("GFE down") { + @Override + public void run() throws CacheException { + try { + CacheFactory.getAnyInstance(); + fail("expected the cache to be closed"); + } catch (CacheClosedException ex) { + // expected + } + DistributedSystem sys = InternalDistributedSystem + .getConnectedInstance(); + assertNull("expected the distributed system to be down", sys); + } + }; + for (VM vm : vms) { + if (vm == null) { + noGFE.run(); + } else { + vm.invoke(noGFE); + } + } + } + + private void checkMembersEqual(List resultMembers, + DistributedMember... expectedMembers) { + assertEquals("expected number of results: " + expectedMembers.length, + expectedMembers.length, resultMembers.size()); + for (DistributedMember member : expectedMembers) { + assertTrue("expected to find VM in result: " + member, + resultMembers.contains(member)); + } + } + + private List executeOnServerGroups(String serverGroups) { + GfxdListResultCollector gfxdRC = new GfxdListResultCollector(); + ResultCollector rc = ServerGroupUtils + .onServerGroups(serverGroups, false).withArgs(Boolean.TRUE) + .withCollector(gfxdRC).execute(ClientServer2DUnit.TestFunction.ID); + return (List)rc.getResult(); + } + + private static void executeForUser(String userName, String sql) + throws SQLException { + final Properties userProps = new Properties(); + userProps.setProperty(PartitionedRegion.rand.nextBoolean() + ? com.pivotal.gemfirexd.Attribute.USERNAME_ATTR + : com.pivotal.gemfirexd.Attribute.USERNAME_ALT_ATTR, userName); + userProps.setProperty(com.pivotal.gemfirexd.Attribute.PASSWORD_ATTR, userName); + final Connection userConn = TestUtil.getConnection(userProps); + userConn.createStatement().execute(sql); + } + + @SuppressWarnings("SameParameterValue") + private void executeOnServerForUser(int serverNum, final String userName, + final String sql) throws Exception { + serverExecute(serverNum, new SerializableRunnable("executing " + sql + + " with userName " + userName) { + @Override + public void run() throws CacheException { + try { + executeForUser(userName, sql); + } catch (SQLException ex) { + throw new CacheException(ex) { + }; + } + } + }); + } + + private static final class TestFunction implements Function { + + private static final String ID = "ClientServerDUnit.TestFunction"; + + public void execute(FunctionContext context) { + Object args = context.getArguments(); + if (args instanceof Boolean && (Boolean)args) { + InternalDistributedMember myId = Misc.getGemFireCache().getMyId(); + context.getResultSender().lastResult(myId); + } else { + context.getResultSender().lastResult(null); + } + } + + public String getId() { + return ID; + } + + public boolean hasResult() { + return true; + } + + public boolean optimizeForWrite() { + return false; + } + + public boolean isHA() { + return true; + } + } +} diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServerDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServerDUnit.java index bb3c7858f..dc8f32a90 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServerDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServerDUnit.java @@ -23,24 +23,20 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.List; -import java.util.Locale; import java.util.Properties; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import com.gemstone.gemfire.cache.*; -import com.gemstone.gemfire.cache.execute.Function; -import com.gemstone.gemfire.cache.execute.FunctionContext; -import com.gemstone.gemfire.cache.execute.FunctionException; -import com.gemstone.gemfire.cache.execute.FunctionService; -import com.gemstone.gemfire.cache.execute.ResultCollector; -import com.gemstone.gemfire.distributed.DistributedMember; +import com.gemstone.gemfire.cache.CacheException; +import com.gemstone.gemfire.cache.DataPolicy; +import com.gemstone.gemfire.cache.PartitionAttributes; +import com.gemstone.gemfire.cache.PartitionAttributesFactory; +import com.gemstone.gemfire.cache.PartitionedRegionStorageException; +import com.gemstone.gemfire.cache.Scope; import com.gemstone.gemfire.distributed.DistributedSystem; import com.gemstone.gemfire.distributed.internal.DistributionConfig; import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem; -import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember; import com.gemstone.gemfire.internal.AvailablePort; import com.gemstone.gemfire.internal.SocketCreator; import com.gemstone.gemfire.internal.cache.PartitionedRegion; @@ -53,9 +49,7 @@ import com.pivotal.gemfirexd.internal.engine.GemFireXDQueryObserverAdapter; import com.pivotal.gemfirexd.internal.engine.GemFireXDQueryObserverHolder; import com.pivotal.gemfirexd.internal.engine.GfxdConstants; -import com.pivotal.gemfirexd.internal.engine.Misc; import com.pivotal.gemfirexd.internal.engine.ddl.resolver.GfxdPartitionByExpressionResolver; -import com.pivotal.gemfirexd.internal.engine.distributed.GfxdListResultCollector; import com.pivotal.gemfirexd.internal.engine.jdbc.GemFireXDRuntimeException; import com.pivotal.gemfirexd.internal.engine.store.ServerGroupUtils; import com.pivotal.gemfirexd.internal.iapi.error.PublicAPI; @@ -64,19 +58,16 @@ import com.pivotal.gemfirexd.internal.iapi.services.monitor.Monitor; import com.pivotal.gemfirexd.internal.iapi.sql.Activation; import com.pivotal.gemfirexd.internal.iapi.sql.dictionary.SchemaDescriptor; -import com.pivotal.gemfirexd.internal.iapi.util.StringUtil; import com.pivotal.gemfirexd.internal.impl.jdbc.EmbedStatement; import com.pivotal.gemfirexd.internal.impl.jdbc.authentication.AuthenticationServiceBase; import io.snappydata.app.TestThrift; import io.snappydata.test.dunit.Host; import io.snappydata.test.dunit.RMIException; -import io.snappydata.test.dunit.SerializableCallable; import io.snappydata.test.dunit.SerializableRunnable; import io.snappydata.test.dunit.VM; import io.snappydata.test.util.TestException; import io.snappydata.thrift.internal.ClientConnection; import org.apache.derby.drda.NetworkServerControl; -import org.apache.derbyTesting.junit.JDBC; /** * Test that client and server are being correctly configured with different @@ -86,144 +77,12 @@ * @since 6.0 */ @SuppressWarnings("serial") -public class ClientServerDUnit extends DistributedSQLTestBase { +public class ClientServerDUnit extends ClientServerTestBase { public ClientServerDUnit(String name) { super(name); } - @Override - public void tearDown2() throws Exception { - super.tearDown2(); - // delete the top-level datadictionary created by some tests in this suite - File dir = new File("datadictionary"); - boolean result = TestUtil.deleteDir(dir); - TestUtil.getLogger().info( - "For Test: " + getClassName() + ":" + getTestName() - + " found and deleted stray datadictionarydir at: " - + dir.toString() + " : " + result); - } - - private RegionAttributesCreation getServerTestTableProperties() { - - // Create a set of expected region attributes for the table - RegionAttributesCreation serverAttrs = new RegionAttributesCreation(); - serverAttrs.setDataPolicy(DataPolicy.PARTITION); - serverAttrs.setConcurrencyChecksEnabled(false); - PartitionAttributes pa = new PartitionAttributesFactory() - .setPartitionResolver(new GfxdPartitionByExpressionResolver()).create(); - serverAttrs.setPartitionAttributes(pa); - serverAttrs.setInitialCapacity(GfxdConstants.DEFAULT_INITIAL_CAPACITY); - serverAttrs.setAllHasFields(true); - serverAttrs.setHasScope(false); - serverAttrs.setHasDiskDirs(false); - serverAttrs.setHasDiskWriteAttributes(false); - - return serverAttrs; - } - - private RegionAttributesCreation[] checkTestTableProperties(String schemaName) - throws Exception { - return checkTestTableProperties(schemaName, false); - } - - private RegionAttributesCreation[] checkTestTableProperties(String schemaName, - boolean isDataStore) throws Exception { - - RegionAttributesCreation serverAttrs = getServerTestTableProperties(); - - if (schemaName == null) { - schemaName = SchemaDescriptor.STD_DEFAULT_SCHEMA_NAME; - } - // Check the table attributes on the servers and the client - serverVerifyRegionProperties(1, schemaName, "TESTTABLE", serverAttrs); - serverVerifyRegionProperties(2, schemaName, "TESTTABLE", serverAttrs); - - // Check that the local-max-memory PR attribute is zero on the client - RegionAttributesCreation clientAttrs = new RegionAttributesCreation( - serverAttrs, false); - final PartitionAttributes pa; - if (isDataStore) { - pa = new PartitionAttributesFactory(clientAttrs - .getPartitionAttributes()).setLocalMaxMemory( - PartitionAttributesFactory.LOCAL_MAX_MEMORY_DEFAULT).create(); - } else { - pa = new PartitionAttributesFactory(clientAttrs - .getPartitionAttributes()).setLocalMaxMemory(0).create(); - } - clientAttrs.setPartitionAttributes(pa); - TestUtil.verifyRegionProperties(schemaName, "TESTTABLE", TestUtil - .regionAttributesToXML(clientAttrs)); - return new RegionAttributesCreation[] { serverAttrs, clientAttrs }; - } - - private void checkVMsDown(VM... vms) { - SerializableRunnable noGFE = new SerializableRunnable("GFE down") { - @Override - public void run() throws CacheException { - try { - CacheFactory.getAnyInstance(); - fail("expected the cache to be closed"); - } catch (CacheClosedException ex) { - // expected - } - DistributedSystem sys = InternalDistributedSystem - .getConnectedInstance(); - assertNull("expected the distributed system to be down", sys); - } - }; - for (VM vm : vms) { - if (vm == null) { - noGFE.run(); - } - else { - vm.invoke(noGFE); - } - } - } - - // Try some metadata calls - private void checkDBMetadata(Connection conn, String... urls) throws SQLException { - DatabaseMetaData dbmd = conn.getMetaData(); - String actualUrl = dbmd.getURL(); - // remove any trailing slash - getLogWriter().info("Got DB " + dbmd.getDatabaseProductName() + ' ' - + dbmd.getDatabaseProductVersion() + " using URL " + actualUrl); - actualUrl = actualUrl.replaceFirst("/$", ""); - boolean foundMatch = false; - for (String url : urls) { - url = url.replaceFirst("/$", ""); - if (url.equals(actualUrl)) { - foundMatch = true; - break; - } - } - if (!foundMatch) { - fail("Expected one of the provided URLs " - + java.util.Arrays.toString(urls) + " to match " + actualUrl); - } - ResultSet rs = dbmd.getCatalogs(); - while (rs.next()) { - getLogWriter().info("Got DB catalog: " + rs.getString(1)); - } - rs.close(); - rs = dbmd.getSchemas(); - while (rs.next()) { - getLogWriter().info("Got DB schema: " + rs.getString(1) - + " in catalog=" + rs.getString(2)); - } - rs.close(); - rs = dbmd.getProcedures(null, null, null); - while (rs.next()) { - getLogWriter().info("Got Procedure " + rs.getString(3) + " in catalog=" - + rs.getString(1) + ", schema=" + rs.getString(2)); - } - rs.close(); - // also check for a few flags that are failing over network connection - assertTrue(dbmd.othersInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY)); - assertTrue(dbmd.othersDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY)); - } - // ----------------------- Tests in this class start below // Check that the client and server are correctly setup in default case of @@ -1966,164 +1825,6 @@ public void testNetworkClientFailoverWithCurrentSchemaSetting() throws Exception conn.close(); } - /** - * Test if client is successfully able to failover to secondary locators for - * control connection specified with "secodary-locators" property if the - * primary one is unavailable at the time of the first connection (#47486). - */ - public void testNetworkClientFailoverMultipleLocators() throws Throwable { - // start some locators and couple of servers - int locPort1 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - int locPort2 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - int locPort3 = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); - - final String address = SocketCreator.getLocalHost().getHostAddress(); - // try with both host:port and host[port] for client connection - String locators = address + '[' + locPort1 + "]," + address + '[' - + locPort2 + "]," + address + '[' + locPort3 + ']'; - - Properties props = new Properties(); - props.setProperty("locators", locators); - props.setProperty("mcast-port", "0"); - startLocatorVM(address, locPort1, null, props); - startLocatorVM(address, locPort2, null, props); - startLocatorVM(address, locPort3, null, props); - - // now a server and a peer client - startVMs(1, 1, 0, null, props); - - // start network servers on all members - final int netPort1 = startNetworkServer(1, null, null); - final int netPort2 = startNetworkServer(2, null, null); - final int netPort3 = startNetworkServer(3, null, null); - startNetworkServer(4, null, null); - - // now client connections using all locators, second and third locators and - // only third locator - final String secondaryLocators = address + ':' + netPort2 + ',' + address - + ':' + netPort3; - final String secondaryLocators2 = address + '[' + netPort2 + "]," + address - + '[' + netPort3 + ']'; - - Connection netConn1 = TestUtil.getNetConnection(address, netPort1, - ";secondary-locators=" + secondaryLocators, null); - Statement stmt = netConn1.createStatement(); - stmt.execute("create table T.TESTTABLE (ID int primary key, " - + "DESCRIPTION varchar(1024) not null) redundancy 1"); - - final AtomicInteger id = new AtomicInteger(1); - SerializableCallable testConn = new SerializableCallable() { - @Override - public Object call() { - /* - System.setProperty("gemfirexd.debug.true", "TraceClientHA"); - SanityManager.TraceClientHA = true; - */ - try { - final Connection netConn; - final Properties props; - final int currId = id.get(); - switch (currId % 4) { - case 1: - netConn = TestUtil.getNetConnection(address, netPort1, - ";secondary-locators=" + secondaryLocators2, null); - break; - case 2: - props = new Properties(); - props.setProperty("secondary-locators", secondaryLocators); - netConn = TestUtil.getNetConnection(address, netPort1, null, - props); - break; - case 3: - props = new Properties(); - props.setProperty("secondary-locators", secondaryLocators2); - netConn = TestUtil.getNetConnection(address, netPort1, null, - props); - break; - default: - netConn = TestUtil.getNetConnection(address, netPort1, - ";secondary-locators=" + secondaryLocators, null); - break; - } - Statement stmt = netConn.createStatement(); - assertEquals(1, stmt.executeUpdate("insert into T.TESTTABLE values (" - + currId + ",'DESC" + currId + "')")); - return id.incrementAndGet(); - } catch (Throwable t) { - return t; - } - } - }; - - // stop the locators and test for net connections from all JVMs - checkAndSetId(id, testConn.call()); - stopVMNum(-1); - checkAndSetId(id, testConn.call()); - - checkAndSetId(id, serverExecute(1, testConn)); - stopVMNum(-2); - checkAndSetId(id, serverExecute(1, testConn)); - - checkAndSetId(id, serverExecute(2, testConn)); - stopVMNum(-3); - checkAndSetId(id, serverExecute(2, testConn)); - - checkAndSetId(id, testConn.call()); - checkAndSetId(id, serverExecute(1, testConn)); - checkAndSetId(id, serverExecute(2, testConn)); - - // executing from new server must fail since no locator is up at this point - try { - checkAndSetId(id, serverExecute(4, testConn)); - fail("expected connection failure with no locator available"); - } catch (SQLException sqle) { - if (!"08006".equals(sqle.getSQLState()) - && !"X0Z01".equals(sqle.getSQLState()) - && !"40XD0".equals(sqle.getSQLState())) { - fail("unexpected exception", sqle); - } - } - - // finally verify the results - SerializableCallable verify = new SerializableCallable() { - @Override - public Object call() { - try { - final Connection netConn = TestUtil.getNetConnection(address, - netPort1, ";secondary-locators=" + secondaryLocators, null); - Statement stmt = netConn.createStatement(); - ResultSet rs = stmt.executeQuery("select * from T.TESTTABLE"); - final Object[][] expectedResults = new Object[][] { - new Object[] { 1, "DESC1" }, new Object[] { 2, "DESC2" }, - new Object[] { 3, "DESC3" }, new Object[] { 4, "DESC4" }, - new Object[] { 5, "DESC5" }, new Object[] { 6, "DESC6" }, - new Object[] { 7, "DESC7" }, new Object[] { 8, "DESC8" }, - new Object[] { 9, "DESC9" }, }; - JDBC.assertUnorderedResultSet(rs, expectedResults, false); - return id.get(); - } catch (Throwable t) { - return t; - } - } - }; - checkAndSetId(id, verify.call()); - checkAndSetId(id, serverExecute(1, verify)); - checkAndSetId(id, serverExecute(2, verify)); - } - - private void checkAndSetId(final AtomicInteger id, Object result) - throws Throwable { - if (result instanceof Integer) { - id.set((Integer)result); - } - else if (result instanceof Throwable) { - throw (Throwable)result; - } - else { - fail("unexpected result " + result); - } - } - /** * Test if multiple connections from network clients are load-balanced across * multiple servers using GFE's ServerLocator. Also check for the failover. @@ -2308,499 +2009,6 @@ public void testNetworkClientLoadBalancing() throws Exception { assertNumConnections(-5, -5, 2); } - public void testPersistentDD() throws Exception { - // Start one server - AsyncVM async1 = invokeStartServerVM(1, 0, null, null); - // Start a second server with DD persistence - Properties props = new Properties(); - props.setProperty(com.pivotal.gemfirexd.Attribute.SYS_PERSISTENT_DIR, "SYS"); - AsyncVM async2 = invokeStartServerVM(2, 0, null, props); - - // Start a client - startClientVMs(1, 0, null); - - // wait for servers to start - joinVMs(true, async1, async2); - - // Create a table - clientSQLExecute(1, "create table TESTTABLE (ID int not null, " - + "DESCRIPTION varchar(1024) not null)"); - - // Also try creating the same table with a different user name - String userName = "TesT1"; - // just in the very remote case this clashes with random user name - if (userName.equalsIgnoreCase(TestUtil.currentUserName)) { - userName = "TestT2"; - } - final String userSchemaName = StringUtil.SQLToUpperCase(userName); - executeForUser(userName, "create table TESTTABLE (ID int not null, " - + "DESCRIPTION varchar(1024) not null)"); - - final String schemaName = getCurrentDefaultSchemaName(); - // Check the region properties - RegionAttributesCreation[] expectedAttrs = checkTestTableProperties( - schemaName); - RegionAttributesCreation[] userExpectedAttrs = checkTestTableProperties( - userSchemaName); - - // Restart everything and recheck - stopVMNums(-1, -2, 1); - // verify that nothing is running - checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), - this.serverVMs.get(1)); - - async1 = restartServerVMAsync(1, 0, null, null); - async2 = restartServerVMAsync(2, 0, null, props); - joinVMs(false, async1, async2); - restartVMNums(1); - - // Check the region properties on server with DD persistence - serverVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[0]); - serverVerifyRegionProperties(1, userSchemaName, "TESTTABLE", - userExpectedAttrs[0]); - // Check that region exists on other server and client due to GII from - // persisted server - serverVerifyRegionProperties(2, schemaName, "TESTTABLE", expectedAttrs[0]); - serverVerifyRegionProperties(2, userSchemaName, "TESTTABLE", - userExpectedAttrs[0]); - clientVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[1]); - clientVerifyRegionProperties(1, userSchemaName, "TESTTABLE", - userExpectedAttrs[1]); - - // Also check that stale DD persisted data is overridden. - stopVMNums(-1, 1); - - serverSQLExecute(2, "drop table TESTTABLE"); - executeOnServerForUser(2, userName, "drop table testtable"); - serverSQLExecute(2, "create table TESTTABLE (ID int not null, " - + "DESCRIPTION varchar(1024) not null) replicate"); - executeOnServerForUser(2, userName, "create table TESTTABLE (ID int " - + "not null, DESCRIPTION varchar(1024) not null) replicate"); - - restartVMNums(1, -1); - - expectedAttrs[0] = new RegionAttributesCreation(); - expectedAttrs[0].setScope(Scope.DISTRIBUTED_ACK); - expectedAttrs[0].setDataPolicy(DataPolicy.REPLICATE); - expectedAttrs[0].setInitialCapacity(GfxdConstants.DEFAULT_INITIAL_CAPACITY); - expectedAttrs[0].setConcurrencyChecksEnabled(false); - expectedAttrs[0].setAllHasFields(true); - expectedAttrs[0].setHasDiskDirs(false); - expectedAttrs[0].setHasDiskWriteAttributes(false); - serverVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[0]); - serverVerifyRegionProperties(1, userSchemaName, "TESTTABLE", - expectedAttrs[0]); - serverVerifyRegionProperties(2, userSchemaName, "TESTTABLE", - expectedAttrs[0]); - serverVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[0]); - - expectedAttrs[1] = new RegionAttributesCreation(expectedAttrs[0], false); - expectedAttrs[1].setDataPolicy(DataPolicy.EMPTY); - expectedAttrs[1].setConcurrencyChecksEnabled(false); - expectedAttrs[1].setHasDiskDirs(false); - expectedAttrs[1].setHasDiskWriteAttributes(false); - clientVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[1]); - clientVerifyRegionProperties(1, userSchemaName, "TESTTABLE", - expectedAttrs[1]); - - // Stop everything and check that new table properties are being persisted. - stopVMNums(-1, 1); - stopVMNums(-2); - // start persisted DD VM first using other VM's DD - joinVM(false, restartServerVMAsync(1, 0, null, props)); - props = new Properties(); - props.setProperty(com.pivotal.gemfirexd.Attribute.GFXD_PERSIST_DD, "true"); - async2 = restartServerVMAsync(2, 0, null, props); - joinVM(false, async2); - restartVMNums(1); - - // Check the region properties on server with DD persistence - serverVerifyRegionProperties(2, schemaName, "TESTTABLE", expectedAttrs[0]); - serverVerifyRegionProperties(2, userSchemaName, "TESTTABLE", - expectedAttrs[0]); - // Check that region exists on other server and client due to GII from - // persisted server - serverVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[0]); - serverVerifyRegionProperties(1, userSchemaName, "TESTTABLE", - expectedAttrs[0]); - clientVerifyRegionProperties(1, schemaName, "TESTTABLE", expectedAttrs[1]); - clientVerifyRegionProperties(1, userSchemaName, "TESTTABLE", - expectedAttrs[1]); - - // Drop the table - clientSQLExecute(1, "drop table TESTTABLE"); - executeForUser(userName, "drop table testTable"); - } - - public void testInitialScripts() throws Exception { - String testsDir = TestUtil.getResourcesDir(); - - TestUtil.deletePersistentFiles = true; - // Start one server - AsyncVM async1 = invokeStartServerVM(1, 0, null, null); - // Start a second server with initial script - Properties props = new Properties(); - props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir - + "/lib/checkInitialScript.sql"); - AsyncVM async2 = invokeStartServerVM(2, 0, null, props); - - // wait for servers to start - joinVMs(true, async1, async2); - - // Start a client - startVMs(1, 0); - - // check that regions have been created but with no data - String ckFile = testsDir + "/lib/checkQuery.xml"; - - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select cid, addr, tid from trade.customers", ckFile, "empty"); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select cid, qty, tid from trade.portfolio", ckFile, "empty"); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select tc.cid, tp.tid, cust_name, availQty from trade.portfolio tp, " - + "trade.customers tc where tp.cid=tc.cid", ckFile, "empty"); - - // drop the tables before restart - clientSQLExecute(1, "drop table trade.portfolio"); - clientSQLExecute(1, "drop table trade.customers"); - - // Restart with both the initial SQL scripts - stopVMNums(1, -1, -2); - // verify that nothing is running - checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), this.serverVMs - .get(1)); - - props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir - + "/lib/checkInitialScript.sql," + testsDir - + "/lib/checkInitialScript2.sql"); - - async2 = restartServerVMAsync(2, 0, null, props); - async1 = restartServerVMAsync(1, 0, null, null); - restartVMNums(1); - joinVMs(false, async1, async2); - - // check that data has been correctly populated - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select cid, addr, tid from trade.customers", ckFile, "dd_cust_insert"); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select cid, qty, tid from trade.portfolio", ckFile, "is_port"); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select tc.cid, tp.tid, cust_name, availQty from trade.portfolio tp, " - + "trade.customers tc where tp.cid=tc.cid", ckFile, "is_cust_port"); - - // Restart and check failure with SQL scripts in incorrect order - stopVMNums(-2); - - // drop the tables before restart - clientSQLExecute(1, "drop table trade.portfolio"); - clientSQLExecute(1, "drop table trade.customers"); - - stopVMNums(1, -1); - - // verify that nothing is running - checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), this.serverVMs - .get(1)); - - restartVMNums(-1, 1); - - props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir - + "/lib/checkInitialScript2.sql," + testsDir - + "/lib/checkInitialScript.sql"); - try { - joinVM(false, restartServerVMAsync(2, 0, null, props)); - fail("Expected an SQLException while starting the VM."); - } catch (RMIException ex) { - if (ex.getCause() instanceof SQLException) { - SQLException sqlEx = (SQLException)ex.getCause(); - if (!"XJ040".equals(sqlEx.getSQLState())) { - throw ex; - } - else { - // Explicitly delete the newly timestamped persistent file. - this.serverVMs.get(1).invoke(DistributedSQLTestBase.class, - "deleteDataDictionaryDir"); - } - } - else { - throw ex; - } - } - // verify that failed server is not running - checkVMsDown(this.serverVMs.get(1)); - - // Restart everything and check that init script fails - // with already existing table - stopVMNums(-1, 1); - // verify that nothing is running - checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), this.serverVMs - .get(1)); - - props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir - + "/lib/checkInitialScript.sql"); - async2 = restartServerVMAsync(2, 0, null, props); - restartVMNums(1); - joinVM(false, async2); - - addExpectedException(new int[] { 1 }, new int[] { 2 }, SQLException.class); - props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir - + "/lib/checkInitialScript.sql," + testsDir - + "/lib/checkInitialScript2.sql"); - try { - joinVM(false, restartServerVMAsync(1, 0, null, props)); - fail("Expected an SQLException while starting the VM."); - } catch (RMIException ex) { - if (ex.getCause() instanceof SQLException) { - SQLException sqlEx = (SQLException)ex.getCause(); - if (!"XJ040".equals(sqlEx.getSQLState())) { - throw ex; - } - else { - // Explicitly delete the newly timestamped persistent file. - this.serverVMs.get(0).invoke(DistributedSQLTestBase.class, - "deleteDataDictionaryDir"); - } - } - else { - throw ex; - } - } - // verify that failed server is not running - checkVMsDown(this.serverVMs.get(0)); - removeExpectedException(new int[] { 1 }, new int[] { 2 }, - SQLException.class); - - // Restart everything and check that init script succeeds - // with already existing table when loading only data - stopVMNums(1, -2); - // verify that nothing is running - checkVMsDown(this.clientVMs.get(0), this.serverVMs.get(0), this.serverVMs - .get(1)); - - restartVMNums(1, -2); - - addExpectedException(new int[] { 1 }, new int[] { 2 }, SQLException.class); - props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir - + "/lib/checkInitialScript2.sql"); - joinVM(false, restartServerVMAsync(1, 0, null, props)); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select cid, addr, tid from trade.customers", ckFile, "dd_cust_insert"); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select cid, qty, tid from trade.portfolio", ckFile, "is_port"); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select tc.cid, tp.tid, cust_name, availQty from trade.portfolio tp, " - + "trade.customers tc where tp.cid=tc.cid", ckFile, "is_cust_port"); - - // Drop tables, start and recheck to see everything is in order. - serverSQLExecute(2, "drop table trade.portfolio"); - serverSQLExecute(2, "drop table trade.customers"); - stopVMNums(-1); - props.setProperty(com.pivotal.gemfirexd.Attribute.INIT_SCRIPTS, testsDir - + "/lib/checkInitialScript.sql," + testsDir - + "/lib/checkInitialScript2.sql"); - joinVM(false, restartServerVMAsync(1, 0, null, props)); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select cid, addr, tid from trade.customers", ckFile, "dd_cust_insert"); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select cid, qty, tid from trade.portfolio", ckFile, "is_port"); - sqlExecuteVerify(new int[] { 1 }, new int[] { 1, 2 }, - "select tc.cid, tp.tid, cust_name, availQty from trade.portfolio tp, " - + "trade.customers tc where tp.cid=tc.cid", ckFile, "is_cust_port"); - } - - - /** - * Bug#46682 test: do not allow a node to join the cluster if its locale is - * different from the cluster. - * The derby 'territory' property allows us to change the locale of the - * database. - */ - public void testStartUpWithLocaleSet() throws Exception { - stopAllVMs(); - Properties props = new Properties(); - // start some servers - startVMs(0, 1); - startVMs(0, 1); - startVMs(0, 1); - // now change the 'territory' (and hence the database locale) for the next - // VM - String locale1Str = new Locale("fr", "CA").toString(); - // choose a different locale than the one already set - if (!Locale.getDefault().toString().equals(locale1Str)) { - props.setProperty("territory", locale1Str); - } else { - props.setProperty("territory", new Locale("en", "GB").toString()); - } - try { - startVMs(0, 1, 0, null, props); - fail("This test should have failed with GemFireXDRuntimeException." - + "Locale of all nodes in the cluser needs to be same"); - } catch (RMIException e) { - if (!e.getCause().getCause().getMessage() - .startsWith("Locale should be same on all nodes in the cluster")) { - fail("Test failed with unexpected exception :", e); - } - } finally { - stopAllVMs(); - } - } - - /** - * Test for checking routing using server groups - * {@link ServerGroupUtils#onServerGroups}. - */ - public void testServerGroupsRouting() throws Exception { - // start some servers in different server groups and a client - AsyncVM async1 = invokeStartServerVM(1, 0, "SG1", null); - AsyncVM async2 = invokeStartServerVM(2, 0, "SG2", null); - AsyncVM async3 = invokeStartServerVM(3, 0, "SG1, SG3", null); - startClientVMs(1, 0, "SG2, SG4", null); - // wait for servers to start - joinVMs(true, async1, async2, async3); - - // register the function on all the VMs - SerializableRunnable registerFn = new SerializableRunnable( - "register function") { - @Override - public void run() throws CacheException { - FunctionService.registerFunction(new TestFunction()); - } - }; - serverExecute(1, registerFn); - serverExecute(2, registerFn); - serverExecute(3, registerFn); - clientExecute(1, registerFn); - - DistributedMember server1 = getMemberForVM(this.serverVMs.get(0)); - DistributedMember server2 = getMemberForVM(this.serverVMs.get(1)); - DistributedMember server3 = getMemberForVM(this.serverVMs.get(2)); - DistributedMember client1 = getMemberForVM(this.clientVMs.get(0)); - - List resultMembers = executeOnServerGroups("SG3"); - checkMembersEqual(resultMembers, server3); - - resultMembers = executeOnServerGroups("SG2"); - checkMembersEqual(resultMembers, server2, client1); - - resultMembers = executeOnServerGroups("SG4"); - checkMembersEqual(resultMembers, client1); - - resultMembers = executeOnServerGroups("SG2,SG1"); - checkMembersEqual(resultMembers, server1, server2, server3, client1); - - resultMembers = executeOnServerGroups("SG3, SG2"); - checkMembersEqual(resultMembers, server2, server3, client1); - - resultMembers = executeOnServerGroups("SG3, SG4"); - checkMembersEqual(resultMembers, server3, client1); - - // check for execution on all servers with no server groups - resultMembers = executeOnServerGroups(""); - checkMembersEqual(resultMembers, server1, server2, server3); - resultMembers = executeOnServerGroups(null); - checkMembersEqual(resultMembers, server1, server2, server3); - resultMembers = executeOnServerGroups(" "); - checkMembersEqual(resultMembers, server1, server2, server3); - - // check for exception in case of no members with given server groups - try { - resultMembers = executeOnServerGroups("SG5"); - fail("expected function exception"); - } catch (FunctionException ex) { - // ignore expected exception - } - - // check for null results too - GfxdListResultCollector gfxdRC = new GfxdListResultCollector(); - ResultCollector rc = ServerGroupUtils - .onServerGroups("SG1, SG2, SG3, SG4", false).withCollector(gfxdRC) - .execute(TestFunction.ID); - List result = (List)rc.getResult(); - assertEquals("expected number of results: 4", 4, result.size()); - for (Object res : result) { - assertNull("expected null result", res); - } - } - - /** - * Test for checking execution on server groups and nodes using GROUPS() and - * ID() builtin. - */ - public void testNodeAndServerGroupsExecution() throws Exception { - // start some servers in different server groups and a client - AsyncVM async1 = invokeStartServerVM(1, 0, "SG1", null); - AsyncVM async2 = invokeStartServerVM(2, 0, "SG2", null); - AsyncVM async3 = invokeStartServerVM(3, 0, "SG1, SG3", null); - startClientVMs(1, 0, "SG2, SG4", null); - // wait for servers to start - joinVMs(true, async1, async2, async3); - - // create a table and insert some rows - clientSQLExecute(1, "create table EMP.TESTTABLE (id int primary key, " - + "addr varchar(100))"); - final PreparedStatement pstmt = TestUtil.jdbcConn - .prepareStatement("insert into EMP.TESTTABLE values (?, ?)"); - for (int index = 1; index <= 100; ++index) { - pstmt.setInt(1, index); - pstmt.setString(2, "ADDR" + index); - pstmt.execute(); - } - - // now try execute of a query using GROUPS() - int numResults = 0; - final Statement stmt = TestUtil.jdbcConn.createStatement(); - ResultSet rs = stmt.executeQuery("select * from EMP.TESTTABLE " - + "where GROUPS() like 'SG1%'"); - while (rs.next()) { - ++numResults; - } - rs = stmt.executeQuery("select * from EMP.TESTTABLE " - + "where GROUPS() = 'SG2'"); - while (rs.next()) { - ++numResults; - } - assertEquals(100, numResults); - - numResults = 0; - rs = stmt.executeQuery("select * from EMP.TESTTABLE where GROUPS() like " - + "'%SG3' or 'SG2' = GROUPS() or GROUPS() = 'SG1'"); - while (rs.next()) { - ++numResults; - } - assertEquals(100, numResults); - - // check zero results for server groups with no servers - rs = stmt.executeQuery("select * from EMP.TESTTABLE " - + "where GROUPS() like '%SG4'"); - assertFalse(rs.next()); - rs = stmt.executeQuery("select * from EMP.TESTTABLE " + - "where GROUPS() = 'SG3'"); - assertFalse(rs.next()); - - // get the member IDs - DistributedMember server1 = getMemberForVM(this.serverVMs.get(0)); - DistributedMember server2 = getMemberForVM(this.serverVMs.get(1)); - DistributedMember server3 = getMemberForVM(this.serverVMs.get(2)); - DistributedMember client1 = getMemberForVM(this.clientVMs.get(0)); - - // query execution using DSID() builtin - numResults = 0; - rs = stmt.executeQuery("select * from EMP.TESTTABLE where DSID() = '" - + server1.toString() + "' or DSID() = '" + server2.toString() - + "' or DSID() = '" + server3.toString() + "'"); - while (rs.next()) { - ++numResults; - } - assertEquals(100, numResults); - - // check zero results on client - rs = stmt.executeQuery("select * from EMP.TESTTABLE where DSID() = '" - + client1.toString() + "'"); - assertFalse(rs.next()); - rs = stmt.executeQuery("select * from EMP.TESTTABLE where DSID() = '1'"); - assertFalse(rs.next()); - } - /** test for exception when there is no data member available */ public void test41320_41723() throws Exception { // start a client VM @@ -3299,52 +2507,6 @@ else if (throwSqlException.intValue() == 3) { } } - private void checkMembersEqual(List resultMembers, - DistributedMember... expectedMembers) { - assertEquals("expected number of results: " + expectedMembers.length, - expectedMembers.length, resultMembers.size()); - for (DistributedMember member : expectedMembers) { - assertTrue("expected to find VM in result: " + member, - resultMembers.contains(member)); - } - } - - private List executeOnServerGroups(String serverGroups) { - GfxdListResultCollector gfxdRC = new GfxdListResultCollector(); - ResultCollector rc = ServerGroupUtils - .onServerGroups(serverGroups, false).withArgs(Boolean.TRUE) - .withCollector(gfxdRC).execute(TestFunction.ID); - List result = (List)rc.getResult(); - return result; - } - - public static void executeForUser(String userName, String sql) - throws SQLException { - final Properties userProps = new Properties(); - userProps.setProperty(PartitionedRegion.rand.nextBoolean() - ? com.pivotal.gemfirexd.Attribute.USERNAME_ATTR - : com.pivotal.gemfirexd.Attribute.USERNAME_ALT_ATTR, userName); - userProps.setProperty(com.pivotal.gemfirexd.Attribute.PASSWORD_ATTR, userName); - final Connection userConn = TestUtil.getConnection(userProps); - userConn.createStatement().execute(sql); - } - - private void executeOnServerForUser(int serverNum, final String userName, - final String sql) throws Exception { - serverExecute(serverNum, new SerializableRunnable("executing " + sql - + " with userName " + userName) { - @Override - public void run() throws CacheException { - try { - executeForUser(userName, sql); - } catch (SQLException ex) { - throw new CacheException(ex) { - }; - } - } - }); - } - public static void procTest(Integer arg) { getGlobalLogger().info("Invoked procTest with arg: " + arg); } @@ -3375,15 +2537,6 @@ private Properties doSecuritySetup(final Properties props, return props; } - @Override - protected void setCommonProperties(Properties props, int mcastPort, - String serverGroups, Properties extraProps) { - super.setCommonProperties(props, mcastPort, serverGroups, extraProps); - if (props != null) { - props.setProperty(TestUtil.TEST_SKIP_DEFAULT_INITIAL_CAPACITY, "true"); - } - } - public static void waitForDerbyInitialization(NetworkServerControl server) throws InterruptedException { for (int tries = 1; tries <= 20; tries++) { @@ -3395,36 +2548,4 @@ public static void waitForDerbyInitialization(NetworkServerControl server) } } } - - private static final class TestFunction implements Function { - - private static final String ID = "ClientServerDUnit.TestFunction"; - - public void execute(FunctionContext context) { - Object args = context.getArguments(); - if (args instanceof Boolean && ((Boolean)args).booleanValue()) { - InternalDistributedMember myId = Misc.getGemFireCache().getMyId(); - context.getResultSender().lastResult(myId); - } - else { - context.getResultSender().lastResult(null); - } - } - - public String getId() { - return ID; - } - - public boolean hasResult() { - return true; - } - - public boolean optimizeForWrite() { - return false; - } - - public boolean isHA() { - return true; - } - } } diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServerTestBase.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServerTestBase.java new file mode 100644 index 000000000..337166fbb --- /dev/null +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ClientServerTestBase.java @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package com.pivotal.gemfirexd; + +import java.io.File; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; + +import com.gemstone.gemfire.cache.DataPolicy; +import com.gemstone.gemfire.cache.PartitionAttributes; +import com.gemstone.gemfire.cache.PartitionAttributesFactory; +import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation; +import com.pivotal.gemfirexd.internal.engine.GfxdConstants; +import com.pivotal.gemfirexd.internal.engine.ddl.resolver.GfxdPartitionByExpressionResolver; +import com.pivotal.gemfirexd.internal.iapi.sql.dictionary.SchemaDescriptor; + +abstract class ClientServerTestBase extends DistributedSQLTestBase { + + ClientServerTestBase(String name) { + super(name); + } + + @Override + public void tearDown2() throws Exception { + super.tearDown2(); + // delete the top-level datadictionary created by some tests in this suite + File dir = new File("datadictionary"); + boolean result = TestUtil.deleteDir(dir); + TestUtil.getLogger().info( + "For Test: " + getClassName() + ":" + getTestName() + + " found and deleted stray datadictionarydir at: " + + dir.toString() + " : " + result); + } + + @Override + protected void setCommonProperties(Properties props, int mcastPort, + String serverGroups, Properties extraProps) { + super.setCommonProperties(props, mcastPort, serverGroups, extraProps); + if (props != null) { + props.setProperty(TestUtil.TEST_SKIP_DEFAULT_INITIAL_CAPACITY, "true"); + } + } + + final void checkAndSetId(final AtomicInteger id, Object result) + throws Throwable { + if (result instanceof Integer) { + id.set((Integer)result); + } else if (result instanceof Throwable) { + throw (Throwable)result; + } else { + fail("unexpected result " + result); + } + } + + RegionAttributesCreation getServerTestTableProperties() { + // Create a set of expected region attributes for the table + RegionAttributesCreation serverAttrs = new RegionAttributesCreation(); + serverAttrs.setDataPolicy(DataPolicy.PARTITION); + serverAttrs.setConcurrencyChecksEnabled(false); + PartitionAttributes pa = new PartitionAttributesFactory<>() + .setPartitionResolver(new GfxdPartitionByExpressionResolver()).create(); + serverAttrs.setPartitionAttributes(pa); + serverAttrs.setInitialCapacity(GfxdConstants.DEFAULT_INITIAL_CAPACITY); + serverAttrs.setAllHasFields(true); + serverAttrs.setHasScope(false); + serverAttrs.setHasDiskDirs(false); + serverAttrs.setHasDiskWriteAttributes(false); + + return serverAttrs; + } + + RegionAttributesCreation[] checkTestTableProperties(String schemaName) + throws Exception { + return checkTestTableProperties(schemaName, false); + } + + RegionAttributesCreation[] checkTestTableProperties(String schemaName, + boolean isDataStore) throws Exception { + + RegionAttributesCreation serverAttrs = getServerTestTableProperties(); + + if (schemaName == null) { + schemaName = SchemaDescriptor.STD_DEFAULT_SCHEMA_NAME; + } + // Check the table attributes on the servers and the client + serverVerifyRegionProperties(1, schemaName, "TESTTABLE", serverAttrs); + serverVerifyRegionProperties(2, schemaName, "TESTTABLE", serverAttrs); + + // Check that the local-max-memory PR attribute is zero on the client + RegionAttributesCreation clientAttrs = new RegionAttributesCreation( + serverAttrs, false); + final PartitionAttributes pa; + if (isDataStore) { + pa = new PartitionAttributesFactory<>(clientAttrs + .getPartitionAttributes()).setLocalMaxMemory( + PartitionAttributesFactory.LOCAL_MAX_MEMORY_DEFAULT).create(); + } else { + pa = new PartitionAttributesFactory<>(clientAttrs + .getPartitionAttributes()).setLocalMaxMemory(0).create(); + } + clientAttrs.setPartitionAttributes(pa); + TestUtil.verifyRegionProperties(schemaName, "TESTTABLE", TestUtil + .regionAttributesToXML(clientAttrs)); + return new RegionAttributesCreation[]{serverAttrs, clientAttrs}; + } + + // Try some metadata calls + void checkDBMetadata(Connection conn, String... urls) throws SQLException { + DatabaseMetaData dbmd = conn.getMetaData(); + String actualUrl = dbmd.getURL(); + // remove any trailing slash + getLogWriter().info("Got DB " + dbmd.getDatabaseProductName() + ' ' + + dbmd.getDatabaseProductVersion() + " using URL " + actualUrl); + actualUrl = actualUrl.replaceFirst("/$", ""); + boolean foundMatch = false; + for (String url : urls) { + url = url.replaceFirst("/$", ""); + if (url.equals(actualUrl)) { + foundMatch = true; + break; + } + } + if (!foundMatch) { + fail("Expected one of the provided URLs " + + java.util.Arrays.toString(urls) + " to match " + actualUrl); + } + ResultSet rs = dbmd.getCatalogs(); + while (rs.next()) { + getLogWriter().info("Got DB catalog: " + rs.getString(1)); + } + rs.close(); + rs = dbmd.getSchemas(); + while (rs.next()) { + getLogWriter().info("Got DB schema: " + rs.getString(1) + + " in catalog=" + rs.getString(2)); + } + rs.close(); + rs = dbmd.getProcedures(null, null, null); + while (rs.next()) { + getLogWriter().info("Got Procedure " + rs.getString(3) + " in catalog=" + + rs.getString(1) + ", schema=" + rs.getString(2)); + } + rs.close(); + // also check for a few flags that are failing over network connection + assertTrue(dbmd.othersInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY)); + assertTrue(dbmd.othersDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY)); + } +} diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/DistributedSQLTestBase.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/DistributedSQLTestBase.java index a333c9918..c043db89f 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/DistributedSQLTestBase.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/DistributedSQLTestBase.java @@ -299,8 +299,19 @@ protected static String getDUnitLocatorString() { return "localhost[" + getDUnitLocatorPort() + ']'; } - protected void baseSetUp() throws Exception { - super.setUp(); + public static void resetConnection() throws SQLException { + Connection conn = TestUtil.jdbcConn; + if (conn != null) { + try { + conn.rollback(); + conn.close(); + } catch (SQLException ignored) { + } + TestUtil.jdbcConn = null; + } + } + + protected void commonSetUp() throws Exception { GemFireXDUtils.IS_TEST_MODE = true; expectedDerbyExceptions.clear(); @@ -321,18 +332,24 @@ protected void baseSetUp() throws Exception { setLogFile(this.getClass().getName(), this.getName(), numVMs); invokeInEveryVM(this.getClass(), "setLogFile", new Object[] { this.getClass().getName(), this.getName(), numVMs }); + } + protected void baseSetUp() throws Exception { + super.setUp(); + commonSetUp(); // reduce logging if test so requests String logLevel; if ((logLevel = reduceLogging()) != null) { reduceLogLevelForTest(logLevel); } - IndexPersistenceDUnit.deleteAllOplogFiles(); + resetConnection(); + invokeInEveryVM(DistributedSQLTestBase.class, "resetConnection"); } @Override public void setUp() throws Exception { baseSetUp(); + IndexPersistenceDUnit.deleteAllOplogFiles(); } protected void reduceLogLevelForTest(String logLevel) { diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/dataawareprocedure/ListAggDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/dataawareprocedure/ListAggDUnit.java index 7124adf0d..45b162cbb 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/dataawareprocedure/ListAggDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/dataawareprocedure/ListAggDUnit.java @@ -46,15 +46,15 @@ public ListAggDUnit(String name) { static volatile Exception exceptiongot = null; - protected String reduceLogLevel() { + @Override + protected String reduceLogging() { // these tests generate lots of logs, so reducing them return "config"; } public void testDummy() { - } - + public void _testListAggUnderLoad() throws Exception { startVMs(1, 4); startNetworkServer(1, null, null); diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/AlterHDFSStoreDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/AlterHDFSStoreDUnit.java index 63a4114ba..28e3723da 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/AlterHDFSStoreDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/AlterHDFSStoreDUnit.java @@ -470,7 +470,8 @@ public void run() { } catch (Exception e) { getLogWriter().warn("EXCEPTION " + e); } - assertTrue(ddlconflatables.size() == 3); + assertEquals("Unexpected DDLs: " + ddlconflatables, + 3, ddlconflatables.size()); assertTrue(ddlconflatables.get(0).getValueToConflate().startsWith("create hdfsstore")); assertTrue(ddlconflatables.get(1).getValueToConflate().startsWith("create table")); assertTrue(ddlconflatables.get(2).getValueToConflate().startsWith("alter hdfsstore")); diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/BugsDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/BugsDUnit.java index 40e04e9f9..9d96951d2 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/BugsDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/BugsDUnit.java @@ -452,6 +452,9 @@ public void run() { assertEquals(10000, rs.getInt(1)); assertFalse(rs.next()); + // drop the explicitly created app user + sysSt.execute("call sys.drop_user('app')"); + sysConn.close(); conn.close(); } catch (Throwable t) { diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/CreateHDFSStore2DUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/CreateHDFSStore2DUnit.java new file mode 100644 index 000000000..45f711522 --- /dev/null +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/CreateHDFSStore2DUnit.java @@ -0,0 +1,699 @@ +/* + * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package com.pivotal.gemfirexd.ddl; + +import java.io.File; +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl; +import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl; +import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer; +import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer; +import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; +import com.gemstone.gemfire.internal.cache.PartitionedRegion; +import com.pivotal.gemfirexd.DistributedSQLTestBase; +import com.pivotal.gemfirexd.TestUtil; +import com.pivotal.gemfirexd.internal.engine.Misc; +import io.snappydata.test.dunit.SerializableCallable; +import io.snappydata.test.dunit.SerializableRunnable; +import org.apache.derbyTesting.junit.JDBC; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; + +public class CreateHDFSStore2DUnit extends DistributedSQLTestBase { + + public CreateHDFSStore2DUnit(String name) { + super(name); + } + + /** + * use the old value of HLL_CONSTANT to write some hoplogs. + * stop the servers, upgrade the value of HLL_CONSTANT make + * sure that a major compaction automatically runs and validate + * the count_estimate() + */ + public void testUpgradeHLLConstant() throws Exception { + dotestUpgradeHLLConstant(false); + } + + /** + * test that major compaction occurs even when there is only + * one major compacted file + */ + public void testUpgradeHLLConstantOneHoplog() throws Exception { + dotestUpgradeHLLConstant(true); + } + + private void dotestUpgradeHLLConstant(final boolean compactBeforeShutdown) throws Exception { + // Start one client a two servers + // use the old HLL_CONSTANT + invokeInEveryVM(new SerializableCallable() { + @Override + public Object call() throws Exception { + HdfsSortedOplogOrganizer.HLL_CONSTANT = 0.1; + return null; + } + }); + + startVMs(1, 2); + + final File homeDirFile = new File(".", "myhdfs"); + final String homeDir = homeDirFile.getAbsolutePath(); + + Connection conn = TestUtil.getConnection(); + Statement st = conn.createStatement(); + ResultSet rs; + checkDirExistence(homeDir); + assertEquals(0, getNumberOfMajorCompactedFiles(homeDir)); + + st.execute("create schema hdfs"); + st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + + homeDir + "' BatchTimeInterval 1000 milliseconds "); + st.execute("create table hdfs.m1 (col1 int primary key , col2 int) partition" + + " by primary key buckets 2 persistent hdfsstore (myhdfs)"); + + for (int i = 1; i < 150; i++) { + st.execute("insert into hdfs.m1 values (" + i + ", " + i * 10 + ")"); + if (i % 10 == 0) { + // flush 10 ops in each file + String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); + st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); + } + } + + //make sure data is written to HDFS + String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); + st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); + + if (compactBeforeShutdown) { + st.execute("call SYS.HDFS_FORCE_COMPACTION('hdfs.m1', 0)"); + assertEquals(2, getNumberOfMajorCompactedFiles(homeDir)); // one per bucket + } + + //shutdown and restart + stopAllVMs(); + + // update the HLL_CONSTANT + invokeInEveryVM(new SerializableCallable() { + @Override + public Object call() throws Exception { + HdfsSortedOplogOrganizer.HLL_CONSTANT = 0.03; + return null; + } + }); + + long timeBeforeRestart = System.currentTimeMillis(); + + restartVMNums(-1, -2); + restartVMNums(1); + + // wait for the compaction to complete + waitForCriterion(new WaitCriterion() { + @Override + public boolean done() { + return getNumberOfMajorCompactedFiles(homeDir) == 2; // one per bucket + } + + @Override + public String description() { + return "expected 2 major compacted files, found " + getNumberOfMajorCompactedFiles(homeDir); + } + }, 30 * 1000, 1000, true); + + conn = TestUtil.getConnection(); + st = conn.createStatement(); + + assertTrue(st.execute("values SYS.HDFS_LAST_MAJOR_COMPACTION('hdfs.m1')")); + rs = st.getResultSet(); + rs.next(); + assertTrue(rs.getTimestamp(1).getTime() >= timeBeforeRestart); + + st.execute("values COUNT_ESTIMATE('hdfs.m1')"); + rs = st.getResultSet(); + int count = 0; + while (rs.next()) { + count++; + assertTrue("estimate:" + rs.getLong(1), Math.abs(rs.getLong(1) - 150) < 6); //3.25% error + } + assertEquals(1, count); + st.execute("drop table hdfs.m1"); + st.execute("drop hdfsstore myhdfs"); + delete(homeDirFile); + } + + public void testCount() throws Exception { + // Start one client a three servers + startVMs(1, 3); + + final File homeDirFile = new File(".", "myhdfs"); + final String homeDir = homeDirFile.getAbsolutePath(); + + Connection conn = TestUtil.getConnection(); + Statement st = conn.createStatement(); + ResultSet rs; + + checkDirExistence(homeDir); + st.execute("create schema hdfs"); + st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + + homeDir + "' BatchTimeInterval 100 milliseconds"); + st.execute("create table hdfs.m1 (col1 int primary key , col2 int) partition by primary key redundancy 1 hdfsstore (myhdfs)"); + + for (int i = 0; i < 300; i++) { + st.execute("insert into hdfs.m1 values (" + i + ", " + i * 10 + ")"); + } + + //make sure data is written to HDFS + String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); + st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); + + for (int i = 300; i < 600; i++) { + st.execute("insert into hdfs.m1 values (" + i + ", " + i * 10 + ")"); + } + + //make sure data is written to HDFS + st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); + + st.execute("select count(*) from hdfs.m1 -- GEMFIREXD-PROPERTIES queryHDFS=true \n"); + rs = st.getResultSet(); + rs.next(); + assertEquals(600, rs.getInt(1)); + + //shutdown and restart + stopAllVMs(); + restartVMNums(-1, -2, -3); + restartVMNums(1); + + conn = TestUtil.getConnection(); + st = conn.createStatement(); + st.execute("select count(*) from hdfs.m1 -- GEMFIREXD-PROPERTIES queryHDFS=true \n"); + rs = st.getResultSet(); + int count = 0; + while (rs.next()) { + count++; + assertEquals(600, rs.getLong(1)); + } + assertEquals(1, count); + + stopVMNum(-1); + Thread.sleep(3000); + st.execute("select count(*) from hdfs.m1 -- GEMFIREXD-PROPERTIES queryHDFS=true \n"); + rs = st.getResultSet(); + count = 0; + while (rs.next()) { + count++; + assertEquals(600, rs.getLong(1)); + } + assertEquals(1, count); + + st.execute("drop table hdfs.m1"); + st.execute("drop hdfsstore myhdfs"); + delete(homeDirFile); + } + + public void testForceCompact() throws Exception { + doForceCompact(false); + } + + public void testSyncForceCompact() throws Exception { + doForceCompact(true); + } + + private void doForceCompact(final boolean isSynchronous) throws Exception { + // Start one client a two servers + startVMs(1, 2); + + final File homeDirFile = new File(".", "myhdfs"); + final String homeDir = homeDirFile.getAbsolutePath(); + + Connection conn = TestUtil.getConnection(); + Statement st = conn.createStatement(); + + checkDirExistence(homeDir); + assertEquals(0, getNumberOfMajorCompactedFiles(homeDir)); + st.execute("create schema hdfs"); + st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + + homeDir + "' BatchTimeInterval 1000 milliseconds"); + st.execute("create table hdfs.m1 (col1 int primary key , col2 int) partition" + + " by primary key buckets 2 hdfsstore (myhdfs)"); + + // create hoplogs + for (int i = 1; i <= 120; i++) { + st.execute("insert into hdfs.m1 values(" + i + ", " + i * 10 + ")"); + if (i % 10 == 0) { + // flush 10 ops in each file + String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); + st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); + } + } + assertEquals(0, getNumberOfMajorCompactedFiles(homeDir)); + + assertTrue(st.execute("values SYS.HDFS_LAST_MAJOR_COMPACTION('hdfs.m1')")); + ResultSet rs = st.getResultSet(); + rs.next(); + assertEquals(0, rs.getTimestamp(1).getTime()); + long b4Compaction = System.currentTimeMillis(); + st.execute("call SYS.HDFS_FORCE_COMPACTION('hdfs.m1', " + (isSynchronous ? 0 : 1) + ")"); + if (isSynchronous) { + // for synchronous compaction also check the last major compaction time + assertTrue(st.execute("values SYS.HDFS_LAST_MAJOR_COMPACTION('hdfs.m1')")); + rs = st.getResultSet(); + rs.next(); + assertTrue(rs.getTimestamp(1).getTime() >= b4Compaction); + } else { + // wait for the compaction to complete + waitForCriterion(new WaitCriterion() { + @Override + public boolean done() { + return getNumberOfMajorCompactedFiles(homeDir) == 2; // one per bucket + } + + @Override + public String description() { + return "expected 2 major compacted files, found " + + getNumberOfMajorCompactedFiles(homeDir); + } + }, 30 * 1000, 1000, true); + } + assertEquals(2, getNumberOfMajorCompactedFiles(homeDir)); + + st.execute("drop table hdfs.m1"); + st.execute("drop hdfsstore myhdfs"); + delete(homeDirFile); + } + + public void testFlushQueue() throws Exception { + doFlushQueue(false, false); + } + + public void testFlushQueueColocate() throws Exception { + doFlushQueue(false, true); + } + + public void testFlushQueueWO() throws Exception { + doFlushQueue(true, false); + } + + private void doFlushQueue(boolean wo, boolean colo) throws Exception { + // Start one client a two servers + startVMs(1, 2); + + final File homeDirFile = new File(".", "myhdfs"); + final String homeDir = homeDirFile.getAbsolutePath(); + + Connection conn = TestUtil.getConnection(); + Statement st = conn.createStatement(); + + checkDirExistence(homeDir); + st.execute("create schema hdfs"); + st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + + homeDir + "' BatchTimeInterval 300000 milliseconds"); + st.execute("create table hdfs.m1 (col1 int primary key , col2 int) partition" + + " by primary key buckets 2 hdfsstore (myhdfs) " + (wo ? "writeonly" : "")); + + // create queued entries + for (int i = 1; i <= 120; i++) { + st.execute("insert into hdfs.m1 values(" + i + ", " + i * 10 + ")"); + } + + if (!wo && colo) { + st.execute("create table hdfs.m2 (col1 int primary key , col2 int) partition" + + " by primary key colocate with (hdfs.m1) buckets 2 hdfsstore (myhdfs)"); + for (int i = 1; i <= 120; i++) { + st.execute("insert into hdfs.m2 values(" + i + ", " + i * 10 + ")"); + } + } + + // flush queue to hoplogs + st.execute("call SYS.HDFS_FLUSH_QUEUE('hdfs.m1', 30000)"); + + Runnable verify = new SerializableRunnable() { + @Override + public void run() { + waitForCriterion(new WaitCriterion() { + @Override + public boolean done() { + return getQueueSize() == 0; + } + + @Override + public String description() { + return "expected queue size == 0, found " + getQueueSize(); + } + + private int getQueueSize() { + return ((PartitionedRegion)Misc.getGemFireCache().getRegion("/HDFS/M1")) + .getHDFSEventQueueStats().getEventQueueSize(); + } + }, 30000, 1000, true); + } + }; + + serverExecute(1, verify); + serverExecute(2, verify); + + if (colo) { + st.execute("drop table hdfs.m2"); + } + st.execute("drop table hdfs.m1"); + st.execute("drop hdfsstore myhdfs"); + delete(homeDirFile); + } + + public void testForceFileRollover() throws Exception { + // Start one client and two servers + startVMs(1, 2); + + final File homeDirFile = new File(".", "myhdfs"); + final String homeDir = homeDirFile.getAbsolutePath(); + + checkDirExistence(homeDir); + clientSQLExecute(1, "create schema hdfs"); + clientSQLExecute(1, "create hdfsstore myhdfs namenode 'localhost' homedir '" + + homeDir + "' BatchTimeInterval 1 milliseconds"); + clientSQLExecute(1, "create table hdfs.m1 (col1 int primary key , col2 int) partition" + + " by primary key hdfsstore (myhdfs) writeonly buckets 73"); + + for (int i = 1; i <= 200; i++) { + clientSQLExecute(1, "insert into hdfs.m1 values(" + i + ", " + i * 10 + ")"); + } + + // create a colocated table + serverSQLExecute(1, "create table hdfs.m2 (col1 int primary key , col2 int) partition" + + " by primary key colocate with (hdfs.m1) buckets 73 hdfsstore (myhdfs) writeonly"); + for (int i = 1; i <= 200; i++) { + serverSQLExecute(1, "insert into hdfs.m2 values(" + i + ", " + i * 10 + ")"); + } + + String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); + serverSQLExecute(1, "CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); + + serverExecute(1, verifyExtensionCount("MYHDFS", ".shop.tmp", true, "HDFS_M1")); + serverExecute(1, verifyExtensionCount("MYHDFS", ".shop", false, "HDFS_M1")); + serverExecute(2, verifyExtensionCount("MYHDFS", ".shop.tmp", true, "HDFS_M2")); + serverExecute(2, verifyExtensionCount("MYHDFS", ".shop", false, "HDFS_M2")); + + // rollover files from server + serverSQLExecute(1, "call SYS.HDFS_FORCE_WRITEONLY_FILEROLLOVER('hdfs.m1', 0)"); + + // only files of single HDFS.M1 would be rolled over + serverExecute(1, verifyExtensionCount("MYHDFS", ".shop.tmp", false, "HDFS_M1")); + serverExecute(1, verifyExtensionCount("MYHDFS", ".shop", true, "HDFS_M1")); + serverExecute(2, verifyExtensionCount("MYHDFS", ".shop.tmp", true, "HDFS_M2")); + serverExecute(2, verifyExtensionCount("MYHDFS", ".shop", false, "HDFS_M2")); + + // rollover files from client + clientSQLExecute(1, "call SYS.HDFS_FORCE_WRITEONLY_FILEROLLOVER('HDFS.M2', 0)"); + + // now files of HDFS.M2 would also be rolled over + serverExecute(1, verifyExtensionCount("MYHDFS", ".shop.tmp", false, "HDFS_M1")); + serverExecute(1, verifyExtensionCount("MYHDFS", ".shop", true, "HDFS_M1")); + serverExecute(2, verifyExtensionCount("MYHDFS", ".shop.tmp", false, "HDFS_M1")); + serverExecute(2, verifyExtensionCount("MYHDFS", ".shop", true, "HDFS_M2")); + + clientSQLExecute(1, "drop table hdfs.m2"); + clientSQLExecute(1, "drop table hdfs.m1"); + clientSQLExecute(1, "drop hdfsstore myhdfs"); + delete(homeDirFile); + } + + public void testBug48928() throws Exception { + startVMs(1, 2); + int netPort = startNetworkServer(2, null, null); + + Properties props = new Properties(); + props.put("skip-constraint-checks", "true"); + props.put("sync-commits", "true"); + Connection conn = TestUtil.getConnection(props); + Connection conn2 = TestUtil.getConnection(props); + runBug48928(conn, conn2); + + conn = TestUtil.getNetConnection(netPort, null, props); + conn2 = TestUtil.getConnection(props); + runBug48928(conn, conn2); + + conn = TestUtil.getNetConnection(netPort, null, props); + conn2 = TestUtil.getNetConnection(netPort, null, props); + runBug48928(conn, conn2); + + // also check with transactions + conn = TestUtil.getConnection(props); + conn2 = TestUtil.getNetConnection(netPort, null, props); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + conn2.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + runBug48928(conn, conn2); + + conn = TestUtil.getConnection(props); + conn2 = TestUtil.getConnection(props); + conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + conn2.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); + runBug48928(conn, conn2); + + conn = TestUtil.getNetConnection(netPort, null, props); + conn2 = TestUtil.getNetConnection(netPort, null, props); + conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); + conn2.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); + runBug48928(conn, conn2); + } + + /** + * Test for bug 51516 + */ + public void testPeerClientWithUniqueConstraint() throws Exception { + + // Start one client and two servers + startVMs(0, 2); + + final File homeDirFile = new File(".", "myhdfs"); + final String homeDir = homeDirFile.getAbsolutePath(); + + Properties props = new Properties(); + props.setProperty("host-data", "false"); + props.setProperty("mcast-port", "0"); + props.setProperty("locators", getLocatorString()); + Connection conn = TestUtil.getConnection(props); + Statement st = conn.createStatement(); + + checkDirExistence(homeDir); + st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + + homeDir + "'"); + st.execute("create table app.m1 (col1 int, col2 int, col3 int, primary key (col1, col2, col3), constraint cus_uq unique (col1, col2)) persistent hdfsstore (myhdfs) partition by (col1)"); + + //Test violating the unique constraint + st.execute("insert into app.m1 values (11, 22, 33)"); + try { + st.execute("insert into app.m1 values (11, 22, 34)"); + fail("Should have seen a unique constraint violation"); + } catch (SQLException e) { + //Make sure we saw a unique constraint violation. + if (!e.getSQLState().equals("23505")) { + throw e; + } + } + + //If the peer client has a PR but the datastores don't this will fail + st.execute("call sys.rebalance_all_buckets()"); + + st.execute("drop table app.m1"); + st.execute("drop hdfsstore myhdfs"); + delete(homeDirFile); + } + + private void runBug48928(final Connection conn, final Connection conn2) + throws Exception { + + ResultSet rs; + Statement st = conn.createStatement(); + st.execute("create table trade.securities (sec_id int primary key) " + + "partition by primary key"); + st.execute("create table trade.customers (cid int primary key, " + + "tid int, constraint cus_uq unique (tid)) " + + "partition by primary key"); + st.execute("create table trade.buyorders (oid int primary key, " + + "cid int, sid int, tid int, " + + "constraint bo_cust_fk foreign key (cid) " + + "references trade.customers (cid), " + + "constraint bo_sec_fk foreign key (sid) " + + "references trade.securities (sec_id), " + + "constraint bo_cust_fk2 foreign key (tid) " + + "references trade.customers (tid)) partition by primary key"); + st.execute("insert into trade.securities values (11)"); + + st.execute("insert into trade.customers values (12, 15)"); + st.execute("insert into trade.customers values (12, 16)"); + st.execute("insert into trade.customers values (13, 15)"); + + st.execute("insert into trade.buyorders values (1, 10, 14, 18)"); + st.execute("update trade.buyorders set cid = 24 where oid = 1"); + st.execute("update trade.buyorders set sid = 24 where cid = 24"); + st.execute("update trade.buyorders set tid = 28 where oid = 1"); + + st.execute("insert into trade.securities values (11)"); + conn.commit(); + + // verify results + st = conn2.createStatement(); + rs = st.executeQuery("select * from trade.securities"); + assertTrue(rs.next()); + assertEquals(11, rs.getInt(1)); + assertFalse(rs.next()); + + Object[][] expectedOutput = new Object[][]{new Object[]{12, 16}, + new Object[]{13, 15}}; + rs = st.executeQuery("select * from trade.customers"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + st.execute("delete from trade.customers where tid=15"); + expectedOutput = new Object[][]{new Object[]{12, 16}}; + rs = st.executeQuery("select * from trade.customers where tid=16"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + rs = st.executeQuery("select * from trade.customers where cid=12"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + rs = st.executeQuery("select * from trade.customers"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + + expectedOutput = new Object[][]{new Object[]{1, 10, 14, 18}}; + st.execute("insert into trade.buyorders values (1, 10, 14, 18)"); + rs = st.executeQuery("select * from trade.buyorders"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + rs = st.executeQuery("select * from trade.buyorders where cid=10"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + rs = st.executeQuery("select * from trade.buyorders where sid=14"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + rs = st.executeQuery("select * from trade.buyorders where tid=18"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + + st.execute("put into trade.buyorders values (1, 10, 14, 18)"); + conn2.commit(); + rs = st.executeQuery("select * from trade.buyorders"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + rs = st.executeQuery("select * from trade.buyorders where cid=10"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + rs = st.executeQuery("select * from trade.buyorders where sid=14"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + rs = st.executeQuery("select * from trade.buyorders where tid=18"); + JDBC.assertUnorderedResultSet(rs, expectedOutput, false); + + conn2.commit(); + + st.execute("drop table trade.buyorders"); + st.execute("drop table trade.customers"); + st.execute("drop table trade.securities"); + + conn2.commit(); + } + + @SuppressWarnings("SameParameterValue") + private SerializableRunnable verifyExtensionCount(final String hdfsstore, + final String extension, final boolean nonzerocount, final String tablepath) throws Exception { + return new SerializableRunnable() { + @Override + public void run() { + try { + int extensionCount = getExtensionCount(); + if (nonzerocount) assertTrue(extensionCount > 0); + else assertEquals(extensionCount, 0); + } catch (Exception e) { + e.printStackTrace(); + } + } + + private int getExtensionCount() throws Exception { + int counter = 0; + HDFSStoreImpl hdfsStore = GemFireCacheImpl.getInstance(). + findHDFSStore(hdfsstore); + FileSystem fs = hdfsStore.getFileSystem(); + try { + Path basePath = new Path(hdfsStore.getHomeDir() + "/" + tablepath); + + RemoteIterator files = fs.listFiles(basePath, true); + + while (files.hasNext()) { + LocatedFileStatus next = files.next(); + if (next.getPath().getName().endsWith(extension)) + counter++; + } + } catch (IOException e) { + e.printStackTrace(); + } + return counter; + } + }; + } + + private int getNumberOfMajorCompactedFiles(String path) { + File dir = new File(path); + if (!dir.exists()) { + return 0; + } + List expired = new ArrayList<>(); + getExpiredMarkers(dir, expired); + List majorCompacted = new ArrayList<>(); + getMajorCompactedFiles(dir, majorCompacted); + majorCompacted.removeIf(f -> + expired.contains(f + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)); + return majorCompacted.size(); + } + + private void getExpiredMarkers(File file, List expired) { + if (file.isFile()) { + if (!file.isHidden() && file.getName().endsWith( + AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) { + expired.add(file.getName()); + } + return; + } + File[] files = file.listFiles(); + if (files != null) { + for (File f : files) { + getExpiredMarkers(f, expired); + } + } + } + + private void getMajorCompactedFiles(File file, List majorCompactedFiles) { + if (file.isFile()) { + if (!file.isHidden() && file.getName().endsWith( + AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION)) { + majorCompactedFiles.add(file.getName()); + } + return; + } + File[] files = file.listFiles(); + if (files != null) { + for (File f : files) { + getMajorCompactedFiles(f, majorCompactedFiles); + } + } + } + + // Assume no other thread creates the directory at the same time + private void checkDirExistence(String path) { + File dir = new File(path); + if (dir.exists()) { + delete(dir); + } + } +} diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/CreateHDFSStoreDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/CreateHDFSStoreDUnit.java index 670de572b..f7d2bca71 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/CreateHDFSStoreDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/CreateHDFSStoreDUnit.java @@ -25,8 +25,6 @@ import java.sql.Statement; import java.util.ArrayList; import java.util.Arrays; -import java.util.Iterator; -import java.util.List; import java.util.Properties; import java.util.Vector; @@ -35,24 +33,14 @@ import com.gemstone.gemfire.cache.RegionAttributes; import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreFactoryImpl; import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl; -import com.gemstone.gemfire.cache.hdfs.internal.hoplog.AbstractHoplogOrganizer; import com.gemstone.gemfire.cache.hdfs.internal.hoplog.DDLHoplogOrganizer; -import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer; import com.gemstone.gemfire.internal.AvailablePort; -import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; -import com.gemstone.gemfire.internal.cache.PartitionedRegion; import com.gemstone.gemfire.internal.util.BlobHelper; import com.pivotal.gemfirexd.DistributedSQLTestBase; import com.pivotal.gemfirexd.TestUtil; import com.pivotal.gemfirexd.internal.engine.Misc; import com.pivotal.gemfirexd.internal.engine.ddl.DDLConflatable; -import io.snappydata.test.dunit.SerializableCallable; import io.snappydata.test.dunit.SerializableRunnable; -import org.apache.derbyTesting.junit.JDBC; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocatedFileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; /** * @@ -926,560 +914,6 @@ public void testEstimateSize() throws Exception { delete(homeDirFile); } - /** - * use the old value of HLL_CONSTANT to write some hoplogs. - * stop the servers, upgrade the value of HLL_CONSTANT make - * sure that a major compaction automatically runs and validate - * the count_estimate() - */ - public void testUpgradeHLLConstant() throws Exception { - dotestUpgradeHLLConstant(false); - } - - /** - * test that major compaction occurs even when there is only - * one major compacted file - */ - public void testUpgradeHLLConstantOneHoplog() throws Exception { - dotestUpgradeHLLConstant(true); - } - - private void dotestUpgradeHLLConstant(final boolean compactBeforeShutdown) throws Exception { - // Start one client a two servers - // use the old HLL_CONSTANT - invokeInEveryVM(new SerializableCallable() { - @Override - public Object call() throws Exception { - HdfsSortedOplogOrganizer.HLL_CONSTANT = 0.1; - return null; - } - }); - - startVMs(1, 2); - - final File homeDirFile = new File(".", "myhdfs"); - final String homeDir = homeDirFile.getAbsolutePath(); - - Connection conn = TestUtil.getConnection(); - Statement st = conn.createStatement(); - ResultSet rs = null; - checkDirExistence(homeDir); - assertEquals(0, getNumberOfMajorCompactedFiles(homeDir)); - - st.execute("create schema hdfs"); - st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + - homeDir + "' BatchTimeInterval 1000 milliseconds "); - st.execute("create table hdfs.m1 (col1 int primary key , col2 int) partition" - + " by primary key buckets 2 persistent hdfsstore (myhdfs)"); - - for (int i=1; i<150; i++) { - st.execute("insert into hdfs.m1 values ("+i+", "+i*10+")"); - if (i%10 == 0) { - // flush 10 ops in each file - String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); - st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); - } - } - - //make sure data is written to HDFS - String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); - st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); - - if (compactBeforeShutdown) { - st.execute("call SYS.HDFS_FORCE_COMPACTION('hdfs.m1', 0)"); - assertEquals(2, getNumberOfMajorCompactedFiles(homeDir)); // one per bucket - } - - //shutdown and restart - stopAllVMs(); - - // update the HLL_CONSTANT - invokeInEveryVM(new SerializableCallable() { - @Override - public Object call() throws Exception { - HdfsSortedOplogOrganizer.HLL_CONSTANT = 0.03; - return null; - } - }); - - long timeBeforeRestart = System.currentTimeMillis(); - - restartVMNums(-1, -2); - restartVMNums(1); - - // wait for the compaction to complete - waitForCriterion(new WaitCriterion() { - @Override - public boolean done() { - return getNumberOfMajorCompactedFiles(homeDir) == 2; // one per bucket - } - @Override - public String description() { - return "expected 2 major compacted files, found "+getNumberOfMajorCompactedFiles(homeDir); - } - }, 30*1000, 1000, true); - - conn = TestUtil.getConnection(); - st = conn.createStatement(); - - assertTrue(st.execute("values SYS.HDFS_LAST_MAJOR_COMPACTION('hdfs.m1')")); - rs = st.getResultSet(); - rs.next(); - assertTrue(rs.getTimestamp(1).getTime() >= timeBeforeRestart); - - st.execute("values COUNT_ESTIMATE('hdfs.m1')"); - rs = st.getResultSet(); - int count = 0; - while (rs.next()) { - count++; - assertTrue("estimate:"+rs.getLong(1),Math.abs(rs.getLong(1) - 150) < 6); //3.25% error - } - assertEquals(1, count); - st.execute("drop table hdfs.m1"); - st.execute("drop hdfsstore myhdfs"); - delete(homeDirFile); - } - - public void testCount() throws Exception { - // Start one client a three servers - startVMs(1, 3); - - final File homeDirFile = new File(".", "myhdfs"); - final String homeDir = homeDirFile.getAbsolutePath(); - - Connection conn = TestUtil.getConnection(); - Statement st = conn.createStatement(); - ResultSet rs = null; - - checkDirExistence(homeDir); - st.execute("create schema hdfs"); - st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + - homeDir + "' BatchTimeInterval 100 milliseconds"); - st.execute("create table hdfs.m1 (col1 int primary key , col2 int) partition by primary key redundancy 1 hdfsstore (myhdfs)"); - - for (int i=0; i<300; i++) { - st.execute("insert into hdfs.m1 values ("+i+", "+i*10+")"); - } - - //make sure data is written to HDFS - String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); - st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); - - for (int i=300; i<600; i++) { - st.execute("insert into hdfs.m1 values ("+i+", "+i*10+")"); - } - - //make sure data is written to HDFS - st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); - - st.execute("select count(*) from hdfs.m1 -- GEMFIREXD-PROPERTIES queryHDFS=true \n"); - rs = st.getResultSet(); - rs.next(); - assertEquals(600, rs.getInt(1)); - - //shutdown and restart - stopAllVMs(); - restartVMNums(-1, -2, -3); - restartVMNums(1); - - conn = TestUtil.getConnection(); - st = conn.createStatement(); - st.execute("select count(*) from hdfs.m1 -- GEMFIREXD-PROPERTIES queryHDFS=true \n"); - rs = st.getResultSet(); - int count = 0; - while (rs.next()) { - count++; - assertEquals(600, rs.getLong(1)); - } - assertEquals(1, count); - - stopVMNum(-1); - Thread.sleep(3000); - st.execute("select count(*) from hdfs.m1 -- GEMFIREXD-PROPERTIES queryHDFS=true \n"); - rs = st.getResultSet(); - count = 0; - while (rs.next()) { - count++; - assertEquals(600, rs.getLong(1)); - } - assertEquals(1, count); - - st.execute("drop table hdfs.m1"); - st.execute("drop hdfsstore myhdfs"); - delete(homeDirFile); - } - - public void testForceCompact() throws Exception { - doForceCompact(false); - } - - public void testSyncForceCompact() throws Exception { - doForceCompact(true); - } - - private void doForceCompact(final boolean isSynchronous) throws Exception { - // Start one client a two servers - startVMs(1, 2); - - final File homeDirFile = new File(".", "myhdfs"); - final String homeDir = homeDirFile.getAbsolutePath(); - - Connection conn = TestUtil.getConnection(); - Statement st = conn.createStatement(); - - checkDirExistence(homeDir); - assertEquals(0, getNumberOfMajorCompactedFiles(homeDir)); - st.execute("create schema hdfs"); - st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + - homeDir + "' BatchTimeInterval 1000 milliseconds"); - st.execute("create table hdfs.m1 (col1 int primary key , col2 int) partition" + - " by primary key buckets 2 hdfsstore (myhdfs)"); - - // create hoplogs - for (int i=1; i<=120; i++) { - st.execute("insert into hdfs.m1 values("+i+", "+i*10+")"); - if (i%10 == 0) { - // flush 10 ops in each file - String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); - st.execute("CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); - } - } - assertEquals(0, getNumberOfMajorCompactedFiles(homeDir)); - - assertTrue(st.execute("values SYS.HDFS_LAST_MAJOR_COMPACTION('hdfs.m1')")); - ResultSet rs = st.getResultSet(); - rs.next(); - assertEquals(0, rs.getTimestamp(1).getTime()); - long b4Compaction = System.currentTimeMillis(); - st.execute("call SYS.HDFS_FORCE_COMPACTION('hdfs.m1', "+(isSynchronous?0:1)+")"); - if (isSynchronous) { - // for synchronous compaction also check the last major compaction time - assertTrue(st.execute("values SYS.HDFS_LAST_MAJOR_COMPACTION('hdfs.m1')")); - rs = st.getResultSet(); - rs.next(); - assertTrue(rs.getTimestamp(1).getTime() >= b4Compaction); - } else { - // wait for the compaction to complete - waitForCriterion(new WaitCriterion() { - @Override - public boolean done() { - return getNumberOfMajorCompactedFiles(homeDir) == 2; // one per bucket - } - @Override - public String description() { - return "expected 2 major compacted files, found "+getNumberOfMajorCompactedFiles(homeDir); - } - }, 30*1000, 1000, true); - } - assertEquals(2, getNumberOfMajorCompactedFiles(homeDir)); - - st.execute("drop table hdfs.m1"); - st.execute("drop hdfsstore myhdfs"); - delete(homeDirFile); - } - - public void testFlushQueue() throws Exception { - doFlushQueue(false, false); - } - - public void testFlushQueueColocate() throws Exception { - doFlushQueue(false, true); - } - - public void testFlushQueueWO() throws Exception { - doFlushQueue(true, false); - } - - private void doFlushQueue(boolean wo, boolean colo) throws Exception { - // Start one client a two servers - startVMs(1, 2); - - final File homeDirFile = new File(".", "myhdfs"); - final String homeDir = homeDirFile.getAbsolutePath(); - - Connection conn = TestUtil.getConnection(); - Statement st = conn.createStatement(); - - checkDirExistence(homeDir); - st.execute("create schema hdfs"); - st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + - homeDir + "' BatchTimeInterval 300000 milliseconds"); - st.execute("create table hdfs.m1 (col1 int primary key , col2 int) partition" + - " by primary key buckets 2 hdfsstore (myhdfs) " + (wo ? "writeonly" : "")); - - // create queued entries - for (int i=1; i<=120; i++) { - st.execute("insert into hdfs.m1 values("+i+", "+i*10+")"); - } - - if (!wo && colo) { - st.execute("create table hdfs.m2 (col1 int primary key , col2 int) partition" + - " by primary key colocate with (hdfs.m1) buckets 2 hdfsstore (myhdfs)"); - for (int i=1; i<=120; i++) { - st.execute("insert into hdfs.m2 values("+i+", "+i*10+")"); - } - } - - // flush queue to hoplogs - st.execute("call SYS.HDFS_FLUSH_QUEUE('hdfs.m1', 30000)"); - - Runnable verify = new SerializableRunnable() { - @Override - public void run() { - waitForCriterion(new WaitCriterion() { - @Override - public boolean done() { - return getQueueSize() == 0; - } - - @Override - public String description() { - return "expected queue size == 0, found " + getQueueSize(); - } - - private int getQueueSize() { - return ((PartitionedRegion) Misc.getGemFireCache().getRegion("/HDFS/M1")) - .getHDFSEventQueueStats().getEventQueueSize(); - } - }, 30000, 1000, true); - } - }; - - serverExecute(1, verify); - serverExecute(2, verify); - - if (colo) { - st.execute("drop table hdfs.m2"); - } - st.execute("drop table hdfs.m1"); - st.execute("drop hdfsstore myhdfs"); - delete(homeDirFile); - } - - public void testForceFileRollover() throws Exception { - // Start one client and two servers - startVMs(1, 2); - - final File homeDirFile = new File(".", "myhdfs"); - final String homeDir = homeDirFile.getAbsolutePath(); - - checkDirExistence(homeDir); - clientSQLExecute(1, "create schema hdfs"); - clientSQLExecute(1, "create hdfsstore myhdfs namenode 'localhost' homedir '" + - homeDir + "' BatchTimeInterval 1 milliseconds"); - clientSQLExecute(1, "create table hdfs.m1 (col1 int primary key , col2 int) partition" + - " by primary key hdfsstore (myhdfs) writeonly buckets 73"); - - for (int i=1; i<=200; i++) { - clientSQLExecute(1, "insert into hdfs.m1 values("+i+", "+i*10+")"); - } - - // create a colocated table - serverSQLExecute(1,"create table hdfs.m2 (col1 int primary key , col2 int) partition" + - " by primary key colocate with (hdfs.m1) buckets 73 hdfsstore (myhdfs) writeonly"); - for (int i=1; i<=200; i++) { - serverSQLExecute(1,"insert into hdfs.m2 values("+i+", "+i*10+")"); - } - - String qname = HDFSStoreFactoryImpl.getEventQueueName("/HDFS/M1"); - serverSQLExecute(1, "CALL SYS.WAIT_FOR_SENDER_QUEUE_FLUSH('" + qname + "', 1, 0)"); - - serverExecute(1, verifyExtensionCount("MYHDFS", ".shop.tmp", true, "HDFS_M1")); - serverExecute(1, verifyExtensionCount("MYHDFS", ".shop", false, "HDFS_M1")); - serverExecute(2, verifyExtensionCount("MYHDFS", ".shop.tmp", true, "HDFS_M2")); - serverExecute(2, verifyExtensionCount("MYHDFS", ".shop", false, "HDFS_M2")); - - // rollover files from server - serverSQLExecute(1,"call SYS.HDFS_FORCE_WRITEONLY_FILEROLLOVER('hdfs.m1', 0)"); - - // only files of single HDFS.M1 would be rolled over - serverExecute(1, verifyExtensionCount("MYHDFS", ".shop.tmp", false, "HDFS_M1")); - serverExecute(1, verifyExtensionCount("MYHDFS", ".shop", true, "HDFS_M1")); - serverExecute(2, verifyExtensionCount("MYHDFS", ".shop.tmp", true, "HDFS_M2")); - serverExecute(2, verifyExtensionCount("MYHDFS", ".shop", false, "HDFS_M2")); - - // rollover files from client - clientSQLExecute(1,"call SYS.HDFS_FORCE_WRITEONLY_FILEROLLOVER('HDFS.M2', 0)"); - - // now files of HDFS.M2 would also be rolled over - serverExecute(1, verifyExtensionCount("MYHDFS", ".shop.tmp", false, "HDFS_M1")); - serverExecute(1, verifyExtensionCount("MYHDFS", ".shop", true, "HDFS_M1")); - serverExecute(2, verifyExtensionCount("MYHDFS", ".shop.tmp", false, "HDFS_M1")); - serverExecute(2, verifyExtensionCount("MYHDFS", ".shop", true, "HDFS_M2")); - - clientSQLExecute(1, "drop table hdfs.m2"); - clientSQLExecute(1, "drop table hdfs.m1"); - clientSQLExecute(1, "drop hdfsstore myhdfs"); - delete(homeDirFile); - } - - public void testBug48928() throws Exception { - startVMs(1, 2); - int netPort = startNetworkServer(2, null, null); - - Properties props = new Properties(); - props.put("skip-constraint-checks", "true"); - props.put("sync-commits", "true"); - Connection conn = TestUtil.getConnection(props); - Connection conn2 = TestUtil.getConnection(props); - runBug48928(conn, conn2); - - conn = TestUtil.getNetConnection(netPort, null, props); - conn2 = TestUtil.getConnection(props); - runBug48928(conn, conn2); - - conn = TestUtil.getNetConnection(netPort, null, props); - conn2 = TestUtil.getNetConnection(netPort, null, props); - runBug48928(conn, conn2); - - // also check with transactions - conn = TestUtil.getConnection(props); - conn2 = TestUtil.getNetConnection(netPort, null, props); - conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - conn2.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - runBug48928(conn, conn2); - - conn = TestUtil.getConnection(props); - conn2 = TestUtil.getConnection(props); - conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - conn2.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); - runBug48928(conn, conn2); - - conn = TestUtil.getNetConnection(netPort, null, props); - conn2 = TestUtil.getNetConnection(netPort, null, props); - conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); - conn2.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ); - runBug48928(conn, conn2); - } - - /** - * Test for bug 51516 - * @throws Exception - */ - public void testPeerClientWithUniqueConstraint() throws Exception { - - // Start one client and two servers - startVMs(0, 2); - - final File homeDirFile = new File(".", "myhdfs"); - final String homeDir = homeDirFile.getAbsolutePath(); - - Properties props = new Properties(); - props.setProperty("host-data", "false"); - props.setProperty("mcast-port", "0"); - props.setProperty("locators", getLocatorString()); - Connection conn = TestUtil.getConnection(props); - Statement st = conn.createStatement(); - ResultSet rs = null; - - checkDirExistence(homeDir); - st.execute("create hdfsstore myhdfs namenode 'localhost' homedir '" + - homeDir + "'"); - st.execute("create table app.m1 (col1 int, col2 int, col3 int, primary key (col1, col2, col3), constraint cus_uq unique (col1, col2)) persistent hdfsstore (myhdfs) partition by (col1)"); - - //Test violating the unique constraint - st.execute("insert into app.m1 values (11, 22, 33)"); - try { - st.execute("insert into app.m1 values (11, 22, 34)"); - fail("Should have seen a unique constraint violation"); - } catch(SQLException e) { - //Make sure we saw a unique constraint violation. - if(!e.getSQLState().equals("23505")) { - throw e; - } - } - - //If the peer client has a PR but the datastores don't this will fail - st.execute("call sys.rebalance_all_buckets()"); - - st.execute("drop table app.m1"); - st.execute("drop hdfsstore myhdfs"); - delete(homeDirFile); - } - - private void runBug48928(final Connection conn, final Connection conn2) - throws Exception { - - ResultSet rs; - Statement st = conn.createStatement(); - st.execute("create table trade.securities (sec_id int primary key) " - + "partition by primary key"); - st.execute("create table trade.customers (cid int primary key, " - + "tid int, constraint cus_uq unique (tid)) " - + "partition by primary key"); - st.execute("create table trade.buyorders (oid int primary key, " - + "cid int, sid int, tid int, " - + "constraint bo_cust_fk foreign key (cid) " - + "references trade.customers (cid), " - + "constraint bo_sec_fk foreign key (sid) " - + "references trade.securities (sec_id), " - + "constraint bo_cust_fk2 foreign key (tid) " - + "references trade.customers (tid)) partition by primary key"); - st.execute("insert into trade.securities values (11)"); - - st.execute("insert into trade.customers values (12, 15)"); - st.execute("insert into trade.customers values (12, 16)"); - st.execute("insert into trade.customers values (13, 15)"); - - st.execute("insert into trade.buyorders values (1, 10, 14, 18)"); - st.execute("update trade.buyorders set cid = 24 where oid = 1"); - st.execute("update trade.buyorders set sid = 24 where cid = 24"); - st.execute("update trade.buyorders set tid = 28 where oid = 1"); - - st.execute("insert into trade.securities values (11)"); - conn.commit(); - - // verify results - st = conn2.createStatement(); - rs = st.executeQuery("select * from trade.securities"); - assertTrue(rs.next()); - assertEquals(11, rs.getInt(1)); - assertFalse(rs.next()); - - Object[][] expectedOutput = new Object[][] { new Object[] { 12, 16 }, - new Object[] { 13, 15 } }; - rs = st.executeQuery("select * from trade.customers"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - st.execute("delete from trade.customers where tid=15"); - expectedOutput = new Object[][] { new Object[] { 12, 16 } }; - rs = st.executeQuery("select * from trade.customers where tid=16"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - rs = st.executeQuery("select * from trade.customers where cid=12"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - rs = st.executeQuery("select * from trade.customers"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - - expectedOutput = new Object[][] { new Object[] { 1, 10, 14, 18 } }; - st.execute("insert into trade.buyorders values (1, 10, 14, 18)"); - rs = st.executeQuery("select * from trade.buyorders"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - rs = st.executeQuery("select * from trade.buyorders where cid=10"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - rs = st.executeQuery("select * from trade.buyorders where sid=14"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - rs = st.executeQuery("select * from trade.buyorders where tid=18"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - - st.execute("put into trade.buyorders values (1, 10, 14, 18)"); - conn2.commit(); - rs = st.executeQuery("select * from trade.buyorders"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - rs = st.executeQuery("select * from trade.buyorders where cid=10"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - rs = st.executeQuery("select * from trade.buyorders where sid=14"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - rs = st.executeQuery("select * from trade.buyorders where tid=18"); - JDBC.assertUnorderedResultSet(rs, expectedOutput, false); - - conn2.commit(); - - st.execute("drop table trade.buyorders"); - st.execute("drop table trade.customers"); - st.execute("drop table trade.securities"); - - conn2.commit(); - } - private SerializableRunnable execute() throws Exception{ return new SerializableRunnable() { @Override @@ -1498,45 +932,6 @@ public void run() { } }; - } - private SerializableRunnable verifyExtensionCount(final String hdfsstore, - final String extension, final boolean nonzerocount, final String tablepath) throws Exception{ - return new SerializableRunnable() { - @Override - public void run() { - try { - int extensioncount = getExtensioncount(); - if (nonzerocount) - assertTrue( extensioncount > 0); - else - assertEquals(extensioncount , 0); - } catch (Exception e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - protected int getExtensioncount() throws Exception { - int counter =0 ; - HDFSStoreImpl hdfsStore = (HDFSStoreImpl) GemFireCacheImpl.getInstance(). - findHDFSStore(hdfsstore); - FileSystem fs = hdfsStore.getFileSystem(); - try { - Path basePath = new Path(hdfsStore.getHomeDir() + "/"+ tablepath); - - RemoteIterator files = fs.listFiles(basePath, true); - - while(files.hasNext()) { - LocatedFileStatus next = files.next(); - if (next.getPath().getName().endsWith(extension)) - counter++; - } - } catch (IOException e) { - e.printStackTrace(); - } - return counter; - } - }; - } private SerializableRunnable verifyDDLPersistence(final String name) { @@ -1551,8 +946,8 @@ public void run() { } catch (Exception e) { Misc.getGemFireCache().getLoggerI18n().fine("EXCEPTION " + e); } - - assertTrue(ddlconflatables.size() == 5); + assertEquals("Unexpected DDLs: " + ddlconflatables, + 5, ddlconflatables.size()); assertTrue(ddlconflatables.get(0).getValueToConflate().startsWith("create schema")); assertTrue(ddlconflatables.get(1).getValueToConflate().startsWith("create hdfsstore")); assertTrue(ddlconflatables.get(2).getValueToConflate().startsWith("create table")); @@ -1617,59 +1012,4 @@ private void checkDirExistence(String path) { delete(dir); } } - - private int getNumberOfMajorCompactedFiles(String path) { - File dir = new File(path); - if (!dir.exists()) { - return 0; - } - List expired = new ArrayList(); - getExpiredMarkers(dir, expired); - List majorCompacted = new ArrayList(); - getMajorCompactedFiles(dir, majorCompacted); - Iterator it = majorCompacted.iterator(); - while (it.hasNext()) { - String f = it.next(); - if (expired.contains(f+AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) { - it.remove(); - } - } - return majorCompacted.size(); - } - - private void getExpiredMarkers(File file, List expired) { - if (file.isFile()) { - if (!file.isHidden() && file.getName().endsWith(AbstractHoplogOrganizer.EXPIRED_HOPLOG_EXTENSION)) { - expired.add(file.getName()); - } - return; - } - File[] files = file.listFiles(); - if (files != null) { - for (File f : files) { - getExpiredMarkers(f, expired); - } - } - } - - private void getMajorCompactedFiles(File file, List majorCompactedFiles) { - if (file.isFile()) { - if (!file.isHidden() && file.getName().endsWith(AbstractHoplogOrganizer.MAJOR_HOPLOG_EXTENSION)) { - majorCompactedFiles.add(file.getName()); - } - return; - } - File[] files = file.listFiles(); - if(files != null) { - for (File f : files) { - getMajorCompactedFiles(f, majorCompactedFiles); - } - } - } - - @Override - public void tearDown2() throws Exception { - super.tearDown2(); - } - } diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/IndexPersistenceDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/IndexPersistenceDUnit.java index 80094d8c1..14e3ebb4b 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/IndexPersistenceDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/ddl/IndexPersistenceDUnit.java @@ -325,7 +325,6 @@ public void testConstraintIndexes() throws Exception { new Object[] { GfxdConstants.TRACE_PERSIST_INDEX, "false" }); invokeInEveryVM(IndexPersistenceDUnit.class, "setSystemProperty", new Object[] { GfxdConstants.TRACE_PERSIST_INDEX_FINEST, "false" }); - stopAllVMs(); } } @@ -421,7 +420,6 @@ public void testNewIndexCreatedElseWhereWhenAMemberDown() throws Exception { } invokeInEveryVM(IndexPersistenceDUnit.class, "setSystemProperty", new Object[] { GfxdConstants.GFXD_PERSIST_INDEXES, "false" }); - stopAllVMs(); } } @@ -485,7 +483,6 @@ public void testIndexRe_Creation() throws Exception { } invokeInEveryVM(IndexPersistenceDUnit.class, "setSystemProperty", new Object[] { GfxdConstants.GFXD_PERSIST_INDEXES, "false" }); - stopAllVMs(); } } @@ -569,8 +566,7 @@ public String description() { } finally { invokeInEveryVM(IndexPersistenceDUnit.class, "setSystemProperty", new Object[] { GfxdConstants.GFXD_PERSIST_INDEXES, "false" }); - invokeInEveryVM(IndexPersistenceDUnit.class, "unsetTestOplogToTestForCompaction"); - stopAllVMs(); + invokeInEveryVM(IndexPersistenceDUnit.class, "unsetTestOplogToTestForCompaction"); } } @@ -827,7 +823,6 @@ private void verifySNAP1933(String persistIndexes) throws Exception { } finally { invokeInEveryVM(IndexPersistenceDUnit.class, "setSystemProperty", new Object[] {GfxdConstants.GFXD_PERSIST_INDEXES, "true"}); - stopAllVMs(); } } diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/engine/distributed/offheap/transactions/OffHeapTransactionDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/engine/distributed/offheap/transactions/OffHeapTransactionDUnit.java index 105af3746..76dd5f315 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/engine/distributed/offheap/transactions/OffHeapTransactionDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/engine/distributed/offheap/transactions/OffHeapTransactionDUnit.java @@ -18,25 +18,20 @@ import com.pivotal.gemfirexd.transactions.TransactionDUnit; -public class OffHeapTransactionDUnit extends TransactionDUnit{ - - +public class OffHeapTransactionDUnit extends TransactionDUnit { public OffHeapTransactionDUnit(String name) { - super(name); + super(name); } - + @Override public void setUp() throws Exception { + this.configureDefaultOffHeap(true); super.setUp(); - this.configureDefaultOffHeap(true); } - + @Override public String getSuffix() { - return " offheap "; + return " offheap "; } - - - } diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/tools/dataextractor/GemFireXDDataExtractorDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/tools/dataextractor/GemFireXDDataExtractorDUnit.java index 5eb93ff8e..3314b4cd6 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/tools/dataextractor/GemFireXDDataExtractorDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/tools/dataextractor/GemFireXDDataExtractorDUnit.java @@ -83,10 +83,7 @@ public void tearDown2() throws Exception { stopAllVMs(); shutDownAll(); } - - public void testDummy() throws Exception { - } - + public void testHappyPathDataSalvager() throws Exception { String tableName = "REPLICATE_TABLE"; String server1Name = "server1"; diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/OffHeapTransaction2DUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/OffHeapTransaction2DUnit.java new file mode 100644 index 000000000..dc62ba1ea --- /dev/null +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/OffHeapTransaction2DUnit.java @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2017 SnappyData, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package com.pivotal.gemfirexd.internal.transactions; + +public class OffHeapTransaction2DUnit extends Transaction2DUnit { + + public OffHeapTransaction2DUnit(String name) { + super(name); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + this.configureDefaultOffHeap(true); + } + + @Override + public String getSuffix() { + return " offheap"; + } +} diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/Transaction2DUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/Transaction2DUnit.java new file mode 100644 index 000000000..21948214c --- /dev/null +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/Transaction2DUnit.java @@ -0,0 +1,706 @@ +/* + * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ +/* + * Changes for SnappyData distributed computational and data platform. + * + * Portions Copyright (c) 2017 SnappyData, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package com.pivotal.gemfirexd.internal.transactions; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CyclicBarrier; + +import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; +import com.gemstone.gemfire.internal.cache.RegionEntry; +import com.gemstone.gemfire.internal.cache.TXEntryState; +import com.gemstone.gemfire.internal.cache.TXManagerImpl; +import com.gemstone.gemfire.internal.cache.TXStateProxy; +import com.gemstone.gemfire.internal.cache.TransactionObserver; +import com.gemstone.gemfire.internal.cache.TransactionObserverAdapter; +import com.gemstone.gemfire.internal.cache.locks.ExclusiveSharedSynchronizer; +import com.pivotal.gemfirexd.Attribute; +import com.pivotal.gemfirexd.DistributedSQLTestBase; +import com.pivotal.gemfirexd.TestUtil; +import com.pivotal.gemfirexd.internal.engine.GemFireXDQueryObserver; +import com.pivotal.gemfirexd.internal.engine.GemFireXDQueryObserverAdapter; +import com.pivotal.gemfirexd.internal.engine.GemFireXDQueryObserverHolder; +import com.pivotal.gemfirexd.internal.engine.store.GemFireContainer; +import com.pivotal.gemfirexd.internal.shared.common.sanity.SanityManager; +import com.pivotal.gemfirexd.transactions.TransactionDUnit; +import io.snappydata.test.dunit.SerializableRunnable; +import io.snappydata.test.dunit.VM; + +@SuppressWarnings("MagicConstant") +public class Transaction2DUnit extends DistributedSQLTestBase { + + @SuppressWarnings("WeakerAccess") + public Transaction2DUnit(String name) { + super(name); + } + + protected int getIsolationLevel() { + return Connection.TRANSACTION_READ_COMMITTED; + } + + protected String getSuffix() { + return ""; + } + + public void testBug41694() throws Exception { + startServerVMs(2, 0, "sg1"); + startClientVMs(1, 0, null); + Connection conn = TestUtil.jdbcConn; + Statement st = conn.createStatement(); + st.execute("create schema test default server groups (sg1, sg2)"); + st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int not null, " + + "col3 int, col4 int, col5 varchar(10),col6 int, col7 int, col8 int, " + + "col9 int, col10 int, col11 int, col12 int, col13 int, col14 int, " + + "col15 int, col16 int, col17 int, col18 int, col19 int, col20 int, " + + "col21 int,col22 int, col23 int, col24 int, col25 int, col26 int, " + + "col27 int, col28 int, col29 int, col30 int, col31 int, col32 int," + + " col33 int, col34 int, col35 int, col36 int, col37 int, col38 int, " + + "col39 int, col40 int, col41 int, col42 int, col43 int, col44 int, " + + "col45 int, col46 int, col47 int, col48 int, col49 int, col50 int, " + + "col51 int, col52 int, col53 int, col54 int, col55 int, col56 int, " + + "col57 int, col58 int, col59 int, col60 int, col61 int, col62 int, " + + "col63 int, col64 int, col65 int, col66 int, col67 int, col68 int, " + + "col69 int, col70 int, col71 int, col72 int, col73 int, col74 int, " + + "col75 int, col76 int, col77 int, col78 int, col79 int, col80 int, " + + "col81 int, col82 int, col83 int, col84 int, col85 int, col86 int, " + + "col87 int, col88 int, col89 int, col90 int, col91 int, col92 int, " + + "col93 int, col94 int, col95 int, col96 int, col97 int, col98 int, " + + "col99 int, col100 int, Primary Key (PkCol1) ) " + + "Partition by Primary Key server groups (sg1) redundancy 1" + getSuffix()); + conn.commit(); + st.execute("create index IndexCol4 on test.t1 (col4)"); + conn.commit(); + + conn.setTransactionIsolation(getIsolationLevel()); + conn.setAutoCommit(false); + final int numRows = 1; + PreparedStatement psInsert = conn.prepareStatement("insert into test.t1 " + + "values(?, 1000, 1000, 1000, 'XXXX1'" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000 " + + " , 1000, 1000, 1000, 1000, 1000 " + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000" + + " , 1000, 1000, 1000, 1000, 1000 )"); + // st.execute("insert into test.t1 values(10, 10, 10, 10, 'XXXX1')"); + for (int i = 0; i < numRows; i++) { + psInsert.setInt(1, i); + psInsert.executeUpdate(); + conn.commit(); + } + + PreparedStatement psUpdate = conn.prepareStatement("update test.t1 set " + + "col3 = 20 where PkCol1=?"); + // st.execute("update test.t1 set col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=10"); + for (int i = 0; i < 1000; i++) { + // Update the same row over and over should not cause #41694, + // negative bucket size(memory consumed by bucket). + psUpdate.setInt(1, 0); + psUpdate.executeUpdate(); + conn.commit(); + } + + st.close(); + conn.commit(); + } + + public void testBug41873_1() throws Exception { + // Create the controller VM as client which belongs to default server group + startClientVMs(1, 0, null); + startServerVMs(2, -1, "SG1"); + Connection conn = TestUtil.jdbcConn; + conn.setTransactionIsolation(getIsolationLevel()); + conn.setAutoCommit(false); + // create table + clientSQLExecute(1, "Create table t1 (c1 int not null , c2 int not null, " + + "c3 int not null, c4 int not null) redundancy 1 " + + "partition by column (c1) " + getSuffix()); + conn.commit(); + Statement st = conn.createStatement(); + st.execute("insert into t1 values (1, 1,1,1)"); + st.execute("insert into t1 values (114, 114,114,114)"); + conn.commit(); + st.execute("update t1 set c2 =2 where c1 =1"); + st.execute("update t1 set c3 =3 where c1 =1"); + st.execute("update t1 set c4 =4 where c1 =1"); + st.execute("update t1 set c2 =3 where c1 = 114"); + st.execute("update t1 set c3 =4 where c1 =114"); + st.execute("update t1 set c4 =5 where c1 =114"); + conn.commit(); + ResultSet rs = st.executeQuery("Select * from t1 where c1 = 1"); + rs.next(); + assertEquals(1, rs.getInt(1)); + assertEquals(2, rs.getInt(2)); + assertEquals(3, rs.getInt(3)); + assertEquals(4, rs.getInt(4)); + + rs = st.executeQuery("Select * from t1 where c1 = 114"); + rs.next(); + assertEquals(114, rs.getInt(1)); + assertEquals(3, rs.getInt(2)); + assertEquals(4, rs.getInt(3)); + assertEquals(5, rs.getInt(4)); + conn.commit(); + } + + public void testBug42067_1() throws Exception { + // Create the controller VM as client which belongs to default server group + startClientVMs(1, 0, null); + startServerVMs(2, -1, "SG1"); + Connection conn = TestUtil.jdbcConn; + conn.setTransactionIsolation(getIsolationLevel()); + conn.setAutoCommit(false); + // create table + clientSQLExecute(1, "Create table t1 (c1 int not null, " + + "c2 int not null, c3 int not null, c4 int not null) " + + "redundancy 1 partition by column (c1) " + getSuffix()); + conn.commit(); + Statement st = conn.createStatement(); + st.execute("insert into t1 values (1, 1,1,1)"); + st.execute("insert into t1 values (114, 114,114,114)"); + conn.commit(); + st.execute("delete from t1 where c1 =1 and c3 =1"); + st.execute("update t1 set c2 =2 where c1 =1 and c3 =1"); + conn.commit(); + } + + public void testBug42067_2() throws Exception { + // Create the controller VM as client which belongs to default server group + startClientVMs(1, 0, null); + startServerVMs(2, -1, "SG1"); + Connection conn = TestUtil.jdbcConn; + conn.setTransactionIsolation(getIsolationLevel()); + conn.setAutoCommit(false); + // create table + clientSQLExecute(1, "Create table t1 (c1 int not null primary key, " + + "c2 int not null, c3 int not null, c4 int not null) " + + "redundancy 1 partition by column (c1) " + getSuffix()); + conn.commit(); + Statement st = conn.createStatement(); + st.execute("insert into t1 values (1, 1,1,1)"); + st.execute("insert into t1 values (114, 114,114,114)"); + conn.commit(); + st.execute("delete from t1 where c1 =1 and c3 =1"); + st.execute("update t1 set c2 =2 where c1 =1 and c3 =1"); + conn.commit(); + ResultSet rs = st.executeQuery("select * from t1"); + assertTrue(rs.next()); + assertEquals(114, rs.getInt(1)); + assertFalse(rs.next()); + } + + public void testBug41970_43473() throws Throwable { + startVMs(1, 1); + Connection conn = TestUtil.jdbcConn; + conn.setTransactionIsolation(getIsolationLevel()); + conn.setAutoCommit(false); + Statement st = conn.createStatement(); + st.execute("create table customers (cid int not null, cust_name " + + "varchar(100), addr varchar(100), tid int, primary key (cid))"); + st.execute("create table trades (tid int, cid int, eid int, primary Key " + + "(tid), foreign key (cid) references customers (cid))" + getSuffix()); + PreparedStatement pstmt = conn + .prepareStatement("insert into customers values(?,?,?,?)"); + pstmt.setInt(1, 1); + pstmt.setString(2, "name1"); + pstmt.setString(3, "add1"); + pstmt.setInt(4, 1); + pstmt.executeUpdate(); + pstmt.setInt(1, 2); + pstmt.setString(2, "name2"); + pstmt.setString(3, "add2"); + pstmt.setInt(4, 1); + pstmt.executeUpdate(); + conn.commit(); + + ResultSet rs = st.executeQuery("Select * from customers"); + int numRows = 0; + while (rs.next()) { + // Checking number of rows returned, since ordering of results + // is not guaranteed. + numRows++; + } + assertEquals("ResultSet should contain two rows ", 2, numRows); + rs.close(); + conn.commit(); + + // test for #43473 + st.execute("create table sellorders (oid int not null primary key, " + + "cid int, order_time timestamp, status varchar(10), " + + "constraint ch check (status in ('cancelled', 'open', 'filled')))" + getSuffix()); + pstmt = conn.prepareStatement("insert into sellorders values (?, ?, ?, ?)"); + final long currentTime = System.currentTimeMillis(); + final Timestamp ts = new Timestamp(currentTime - 100); + final Timestamp now = new Timestamp(currentTime); + for (int id = 1; id <= 100; id++) { + pstmt.setInt(1, id); + pstmt.setInt(2, id * 2); + pstmt.setTimestamp(3, ts); + pstmt.setString(4, "open"); + pstmt.execute(); + } + conn.commit(); + + final CyclicBarrier barrier = new CyclicBarrier(2); + final Throwable[] failure = new Throwable[1]; + Thread t = new Thread(() -> { + try { + Connection conn2 = TestUtil.getConnection(); + conn2.setTransactionIsolation(getIsolationLevel()); + conn2.setAutoCommit(false); + PreparedStatement pstmt2 = conn2 + .prepareStatement("update sellorders set cid = ? where oid = ?"); + pstmt2.setInt(1, 7); + pstmt2.setInt(2, 3); + assertEquals(1, pstmt2.executeUpdate()); + pstmt2.setInt(1, 3); + pstmt2.setInt(2, 1); + assertEquals(1, pstmt2.executeUpdate()); + + // use a barrier to force txn1 to wait after first EX lock upgrade + // and txn2 to wait before EX_SH lock acquisition + getServerVM(1).invoke(Transaction2DUnit.class, "installObservers"); + barrier.await(); + conn2.commit(); + } catch (Throwable t1) { + failure[0] = t1; + } + }); + t.start(); + + pstmt = conn.prepareStatement("update sellorders " + + "set status = 'cancelled' where order_time < ? and status = 'open'"); + pstmt.setTimestamp(1, now); + barrier.await(); + try { + pstmt.executeUpdate(); + fail("expected conflict exception"); + } catch (SQLException sqle) { + if (!"X0Z02".equals(sqle.getSQLState())) { + throw sqle; + } + } + conn.close(); + + t.join(); + + if (failure[0] != null) { + throw failure[0]; + } + + // clear the observers + serverExecute(1, new SerializableRunnable() { + @Override + public void run() { + GemFireCacheImpl.getExisting().getTxManager().setObserver(null); + GemFireXDQueryObserverHolder.clearInstance(); + } + }); + } + + public void testBug42031IsolationAndTXData() throws Exception { + // Create the controller VM as client which belongs to default server group + startClientVMs(1, 0, null); + startServerVMs(1, -1, "SG1"); + // create table + clientSQLExecute(1, "create table TESTTABLE (ID int not null primary key, " + + "DESCRIPTION varchar(1024), ADDRESS varchar(1024), ID1 int)" + getSuffix()); + + Connection conn = TestUtil.jdbcConn; + conn.setTransactionIsolation(getIsolationLevel()); + conn.setAutoCommit(false); + Statement stmt = conn.createStatement(); + // Do an insert in sql fabric. This will create a primary bucket on the lone + // server VM + // with bucket ID =1 + stmt.executeUpdate("Insert into TESTTABLE values(114,'desc114','Add114',114)"); + + stmt.executeUpdate("Insert into TESTTABLE values(1,'desc1','Add1',1)"); + stmt.executeUpdate("Insert into TESTTABLE values(227,'desc227','Add227',227)"); + stmt.executeUpdate("Insert into TESTTABLE values(340,'desc340','Add340',340)"); + conn.rollback(); + stmt.executeUpdate("Insert into TESTTABLE values(114,'desc114','Add114',114)"); + stmt.executeUpdate("Insert into TESTTABLE values(2,'desc1','Add1',1)"); + stmt.executeUpdate("Insert into TESTTABLE values(224,'desc227','Add227',227)"); + stmt.executeUpdate("Insert into TESTTABLE values(331,'desc340','Add340',340)"); + conn.commit(); + // Bulk Update + stmt.executeUpdate("update TESTTABLE set ID1 = ID1 +1 "); + ResultSet rs = stmt.executeQuery("select ID1 from TESTTABLE"); + Set expected = new HashSet<>(); + expected.add(1); + expected.add(227); + expected.add(340); + expected.add(114); + Set expected2 = new HashSet<>(); + expected2.add(2); + expected2.add(228); + expected2.add(341); + expected2.add(115); + + int numRows = 0; + while (rs.next()) { + int got = rs.getInt(1); + assertTrue(expected2.contains(got)); + ++numRows; + } + assertEquals(expected2.size(), numRows); + + // rollback and check original values + conn.rollback(); + + rs = stmt.executeQuery("select ID1 from TESTTABLE"); + numRows = 0; + while (rs.next()) { + int got = rs.getInt(1); + assertTrue(expected.contains(got)); + ++numRows; + } + assertEquals(expected.size(), numRows); + + // now commit and check success + stmt.executeUpdate("update TESTTABLE set ID1 = ID1 +1 "); + rs = stmt.executeQuery("select ID1 from TESTTABLE"); + numRows = 0; + while (rs.next()) { + int got = rs.getInt(1); + assertTrue(expected2.contains(got)); + ++numRows; + } + assertEquals(expected2.size(), numRows); + + conn.commit(); + + rs = stmt.executeQuery("select ID1 from TESTTABLE"); + numRows = 0; + while (rs.next()) { + int got = rs.getInt(1); + assertTrue(expected2.contains(got)); + ++numRows; + } + assertEquals(expected2.size(), numRows); + } + + public void testIndexMaintenanceOnPrimaryAndSecondary() throws Exception { + startServerVMs(2, 0, "sg1"); + startClientVMs(1, 0, null); + Properties props = new Properties(); + props.setProperty(Attribute.TX_SYNC_COMMITS, "true"); + final Connection conn = TestUtil.getConnection(props); + Statement st = conn.createStatement(); + st.execute("create schema test default server groups (sg1, sg2)"); + st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int " + + "not null , col3 int, col4 int, col5 varchar(10), Primary Key(PkCol1)" + + ") Partition by Primary Key server groups (sg1) redundancy 1" + getSuffix()); + conn.commit(); + st.execute("create index IndexCol4 on test.t1 (col4)"); + conn.commit(); + + conn.setTransactionIsolation(getIsolationLevel()); + conn.setAutoCommit(false); + final int numRows = 10; + VM server1 = this.serverVMs.get(0); + VM server2 = this.serverVMs.get(1); + server1.invoke(TransactionDUnit.class, "installIndexObserver", + new Object[]{"test.IndexCol4", null}); + server2.invoke(TransactionDUnit.class, "installIndexObserver", + new Object[]{"test.IndexCol4", null}); + PreparedStatement psInsert = conn.prepareStatement("insert into test.t1 " + + "values(?, 10, 10, 10, 'XXXX1')"); + for (int i = 0; i < numRows; i++) { + psInsert.setInt(1, i); + psInsert.executeUpdate(); + conn.commit(); + } + + server1.invoke(TransactionDUnit.class, "checkIndexAndReset", + new Object[]{Integer.valueOf(numRows), Integer.valueOf(0)}); + server2.invoke(TransactionDUnit.class, "checkIndexAndReset", + new Object[]{Integer.valueOf(numRows), Integer.valueOf(0)}); + + PreparedStatement psUpdate = conn.prepareStatement("update test.t1 set " + + "col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=?"); + for (int i = 0; i < numRows; i++) { + psUpdate.setInt(1, i); + psUpdate.executeUpdate(); + conn.commit(); + } + + server1.invoke(TransactionDUnit.class, "checkIndexAndReset", new Object[]{ + Integer.valueOf(numRows * 2), Integer.valueOf(numRows)}); + server2.invoke(TransactionDUnit.class, "checkIndexAndReset", new Object[]{ + Integer.valueOf(numRows * 2), Integer.valueOf(numRows)}); + + server1.invoke(TransactionDUnit.class, "resetIndexObserver"); + server2.invoke(TransactionDUnit.class, "resetIndexObserver"); + + st.close(); + conn.close(); + } + + public void testNonColocatedInsertByPartitioning() throws Exception { + startServerVMs(1, 0, "sg1"); + startServerVMs(1, 0, "sg2"); + startClientVMs(1, 0, null); + // TestUtil.loadDriver(); + + Connection conn = TestUtil.jdbcConn; + System.out.println("XXXX the type of conneciton : " + conn); + Statement st = conn.createStatement(); + st.execute("create schema test default server groups (sg1, sg2)"); + st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int not null , " + + "col3 int, col4 int, col5 varchar(10), Primary Key (PkCol1, PkCol2) ) " + + "Partition by column (PkCol1) server groups (sg1)" + getSuffix()); + + st.execute("create table test.t2 (PkCol1 int not null, PkCol2 int not null, " + + " col3 int, col4 varchar(10)) Partition by column (PkCol1)" + + " server groups (sg2)" + getSuffix()); + conn.commit(); + // conn.setTransactionIsolation(getIsolationLevel()); + st.execute("insert into test.t1 values(10, 10, 10, 10, 'XXXX1')"); + st.execute("insert into test.t2 values(10, 10, 10, 'XXXX1')"); + conn.commit(); + } + + /** + * Test updates on tables partitioned by PK. + */ + public void testTransactionalKeyBasedUpdatePartitionedByPk() throws Exception { + startServerVMs(2, 0, "sg1"); + startClientVMs(1, 0, null); + Connection conn = TestUtil.jdbcConn; + conn.setAutoCommit(false); + Statement st = conn.createStatement(); + st.execute("create schema test default server groups (sg1, sg2)"); + st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int not null , " + + "col3 int, col4 int, col5 varchar(10), Primary Key (PkCol1) ) " + + "Partition by Primary Key server groups (sg1) redundancy 1" + getSuffix()); + + conn.setTransactionIsolation(getIsolationLevel()); + PreparedStatement psInsert = conn.prepareStatement("insert into test.t1 " + + "values(?, 10, 10, 10, 'XXXX1')"); + // st.execute("insert into test.t1 values(10, 10, 10, 10, 'XXXX1')"); + for (int i = 0; i < 1000; i++) { + psInsert.setInt(1, i); + psInsert.executeUpdate(); + conn.commit(); + } + ResultSet rs = st.executeQuery("select * from test.t1"); + int numRows = 0; + while (rs.next()) { + assertEquals("Column value should be 10", 10, rs.getInt(3)); + assertEquals("Column value should be 10", 10, rs.getInt(4)); + assertEquals("Column value should be XXXX1", "XXXX1", rs.getString(5) + .trim()); + numRows++; + } + assertEquals("Numbers of rows in resultset should be one", 1000, numRows); + + PreparedStatement psUpdate = conn.prepareStatement("update test.t1 set " + + "col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=?"); + // st.execute("update test.t1 set col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=10"); + for (int i = 0; i < 1000; i++) { + psUpdate.setInt(1, i); + psUpdate.executeUpdate(); + conn.commit(); + } + + rs = st.executeQuery("select * from test.t1"); + numRows = 0; + while (rs.next()) { + assertEquals("Column value should change", 20, rs.getInt(3)); + assertEquals("Columns value should change", 20, rs.getInt(4)); + assertEquals("Columns value should change", "changed", rs.getString(5) + .trim()); + numRows++; + } + assertEquals("Numbers of rows in resultset should be one", 1000, numRows); + rs.close(); + st.close(); + conn.commit(); + conn.close(); + } + + /** + * Test transactional key based updates. + */ + public void testTransactionalKeyBasedUpdates() throws Exception { + startServerVMs(2, 0, "sg1"); + startClientVMs(1, 0, null); + Connection conn = TestUtil.jdbcConn; + conn.setAutoCommit(false); + System.out.println("XXXX the type of conneciton : " + conn); + Statement st = conn.createStatement(); + st.execute("create schema test default server groups (sg1, sg2)"); + st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int not null , " + + "col3 int, col4 int, col5 varchar(10), Primary Key (PkCol1) ) " + + "Partition by column (PkCol1) server groups (sg1)" + getSuffix()); + conn.commit(); + conn.setTransactionIsolation(getIsolationLevel()); + PreparedStatement psInsert = conn.prepareStatement("insert into test.t1 " + + "values(?, 10, 10, 10, 'XXXX1')"); + // st.execute("insert into test.t1 values(10, 10, 10, 10, 'XXXX1')"); + for (int i = 0; i < 1000; i++) { + psInsert.setInt(1, i); + psInsert.executeUpdate(); + conn.commit(); + } + // conn.commit(); + ResultSet rs = st.executeQuery("select * from test.t1"); + int numRows = 0; + while (rs.next()) { + assertEquals("Column value should be 10", 10, rs.getInt(3)); + assertEquals("Column value should be 10", 10, rs.getInt(4)); + assertEquals("Column value should be XXXX1", "XXXX1", rs.getString(5) + .trim()); + numRows++; + } + assertEquals("Numbers of rows in resultset should be one", 1000, numRows); + // conn.commit(); + PreparedStatement psUpdate = conn.prepareStatement("update test.t1 set " + + "col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=?"); + // st.execute("update test.t1 set col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=10"); + for (int i = 0; i < 1000; i++) { + psUpdate.setInt(1, i); + psUpdate.executeUpdate(); + conn.commit(); + } + // conn.commit(); + rs = st.executeQuery("select * from test.t1"); + numRows = 0; + while (rs.next()) { + assertEquals("Column value should change", 20, rs.getInt(3)); + assertEquals("Columns value should change", 20, rs.getInt(4)); + assertEquals("Columns value should change", "changed", rs.getString(5) + .trim()); + numRows++; + } + assertEquals("Numbers of rows in resultset should be one", 1000, numRows); + rs.close(); + st.close(); + conn.commit(); + conn.close(); + } + + @SuppressWarnings("unused") + public static void installObservers() { + final CyclicBarrier testBarrier = new CyclicBarrier(2); + final ConcurrentHashMap waitDone = + new ConcurrentHashMap<>(2); + + TransactionObserver txOb1 = new TransactionObserverAdapter() { + boolean firstCall = true; + + @Override + public void beforeIndividualLockUpgradeInCommit(TXStateProxy tx, + TXEntryState entry) { + if (this.firstCall) { + this.firstCall = false; + return; + } + if (waitDone.putIfAbsent(tx, Boolean.TRUE) == null) { + SanityManager.DEBUG_PRINT("info:TEST", + "TXObserver: waiting on testBarrier, count=" + + testBarrier.getNumberWaiting()); + try { + testBarrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + + @Override + public void afterIndividualRollback(TXStateProxy tx, Object callbackArg) { + // release the barrier for the committing TX + if (waitDone.putIfAbsent(tx, Boolean.TRUE) == null) { + try { + testBarrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + }; + + GemFireXDQueryObserver ob2 = new GemFireXDQueryObserverAdapter() { + @Override + public void lockingRowForTX(TXStateProxy tx, GemFireContainer container, + RegionEntry entry, boolean writeLock) { + if (!writeLock + && ExclusiveSharedSynchronizer.isExclusive(entry.getState()) + && waitDone.putIfAbsent(tx, Boolean.TRUE) == null) { + SanityManager.DEBUG_PRINT("info:TEST", + "GFXDObserver: waiting on testBarrier, count=" + + testBarrier.getNumberWaiting()); + try { + testBarrier.await(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } + }; + + final TXManagerImpl txMgr = GemFireCacheImpl.getExisting().getTxManager(); + for (TXStateProxy tx : txMgr.getHostedTransactionsInProgress()) { + tx.setObserver(txOb1); + } + txMgr.setObserver(txOb1); + GemFireXDQueryObserverHolder.setInstance(ob2); + } +} diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/DummyTest.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/Transaction2RRDUnit.java similarity index 63% rename from gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/DummyTest.java rename to gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/Transaction2RRDUnit.java index bff7b7630..0540c695f 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/DummyTest.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/internal/transactions/Transaction2RRDUnit.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. + * Copyright (c) 2017 SnappyData, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You @@ -14,21 +14,19 @@ * permissions and limitations under the License. See accompanying * LICENSE file. */ -package com.pivotal.gemfirexd.jdbc; -/** - * @author vivekb - * Dummy Test for Debugging - Please do not remove this - * - * Don't have more than one test - */ -public class DummyTest extends JdbcTestBase { - - public DummyTest(String name) { +package com.pivotal.gemfirexd.internal.transactions; + +import java.sql.Connection; + +public class Transaction2RRDUnit extends Transaction2DUnit { + + public Transaction2RRDUnit(String name) { super(name); } - public void testDummy() { - // Dummy + @Override + protected int getIsolationLevel() { + return Connection.TRANSACTION_REPEATABLE_READ; } -} \ No newline at end of file +} diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/security/SecurityTestUtils.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/security/SecurityTestUtils.java index ffb7e4af9..48ec0b46e 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/security/SecurityTestUtils.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/security/SecurityTestUtils.java @@ -1261,7 +1261,8 @@ public static Properties startLdapServerAndGetBootProperties( server = LdapTestServer.getInstance(); } if (!server.isServerStarted()) { - server.startServer(); + int port = AvailablePortHelper.getRandomAvailableTCPPort(); + server.startServer("localhost", port); } Properties bootProps = new Properties(); setLdapServerBootProperties(server, locatorPort, mcastPort, diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/transactions/TransactionDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/transactions/TransactionDUnit.java index 51ce7b0d2..b0d8d2d0d 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/transactions/TransactionDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/transactions/TransactionDUnit.java @@ -22,14 +22,12 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; -import java.sql.Timestamp; import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import javax.sql.XAConnection; @@ -44,7 +42,6 @@ import com.gemstone.gemfire.internal.cache.*; import com.gemstone.gemfire.internal.cache.control.InternalResourceManager; import com.gemstone.gemfire.internal.cache.control.ResourceManagerStats; -import com.gemstone.gemfire.internal.cache.locks.ExclusiveSharedSynchronizer; import com.gemstone.gnu.trove.TIntHashSet; import com.pivotal.gemfirexd.Attribute; import com.pivotal.gemfirexd.DistributedSQLTestBase; @@ -61,9 +58,9 @@ import com.pivotal.gemfirexd.internal.impl.jdbc.EmbedConnection; import com.pivotal.gemfirexd.internal.impl.jdbc.EmbedPreparedStatement; import com.pivotal.gemfirexd.internal.jdbc.EmbeddedXADataSource; -import com.pivotal.gemfirexd.internal.shared.common.sanity.SanityManager; import io.snappydata.jdbc.ClientXADataSource; import io.snappydata.test.dunit.AsyncInvocation; +import io.snappydata.test.dunit.SerializableCallable; import io.snappydata.test.dunit.SerializableRunnable; import io.snappydata.test.dunit.VM; import io.snappydata.test.dunit.standalone.AnyCyclicBarrier; @@ -85,9 +82,71 @@ public TransactionDUnit(String name) { super(name); } + private static List globalClientVMs; + private static List globalServerVMs; + + @Override + public void beforeClass() throws Exception { + super.beforeClass(); + super.baseShutDownAll(); + startVMs(1, 3); + } + + @Override + public void setUp() throws Exception { + super.commonSetUp(); + super.baseSetUp(); + if (globalClientVMs != null) { + clientVMs.clear(); + clientVMs.addAll(globalClientVMs); + serverVMs.clear(); + serverVMs.addAll(globalServerVMs); + } + resetObservers(); + invokeInEveryVM(TransactionDUnit.class, "resetObservers"); + String userName = TestUtil.currentUserName; + setupConnection(userName); + invokeInEveryVM(TransactionDUnit.class, "setupConnection", + new Object[]{userName}); + } + + public static void setupConnection(String userName) throws SQLException { + resetConnection(); + TestUtil.currentUserName = userName; + TestUtil.currentUserPassword = userName; + if (GemFireCacheImpl.getInstance() != null) { + TestUtil.setupConnection(); + } + } + + @Override + protected void baseShutDownAll() throws Exception { + TestUtil.stopNetServer(); + invokeInEveryVM(TestUtil.class, "stopNetServer"); + globalClientVMs = clientVMs; + globalServerVMs = serverVMs; + } + + @Override + public void afterClass() throws Exception { + super.baseShutDownAll(); + super.afterClass(); + } + + public static void resetObservers() { + final GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); + if (cache != null) { + TXManagerImpl txMgr = cache.getCacheTransactionManager(); + for (TXStateProxy tx : txMgr.getHostedTransactionsInProgress()) { + tx.setObserver(null); + } + txMgr.setObserver(null); + GemFireXDQueryObserverHolder.clearInstance(); + } + } + // Uncomment after fixing public void DISABLED_testTransactionalInsertAsSubSelects_diffNullable() throws Exception { - startVMs(1, 2); java.sql.Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); Statement st = conn.createStatement(); @@ -116,7 +175,6 @@ public void DISABLED_testTransactionalInsertAsSubSelects_diffNullable() throws E } public void testTransactionalInsertAsSubSelects() throws Exception { - startVMs(1, 2); java.sql.Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -152,7 +210,6 @@ public void testTransactionalInsertAsSubSelects() throws Exception { * @throws Exception */ public void testTransactionalInsertOnReplicatedTable() throws Exception { - startVMs(1, 2); java.sql.Connection conn = TestUtil.jdbcConn; conn.setAutoCommit(false); Statement st = conn.createStatement(); @@ -261,7 +318,6 @@ public void run() { * @throws Exception */ public void testTransactionalInsertOnPartitionedTable() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; conn.setAutoCommit(false); Statement st = conn.createStatement(); @@ -292,9 +348,26 @@ public void testTransactionalInsertOnPartitionedTable() throws Exception { numRows++; } assertEquals("ResultSet should contain two rows ", 2, numRows); - VM vm = this.serverVMs.get(0); - vm.invoke(getClass(), "checkData", new Object[] { "TRAN.T1", - Long.valueOf(2) }); + final SerializableCallable getLocalSize = new SerializableCallable() { + @Override + public Object call() throws Exception { + if (GemFireCacheImpl.getInstance() != null) { + final PartitionedRegion r = (PartitionedRegion)Misc + .getRegionForTable("TRAN.T1", false); + if (r != null) { + return r.getLocalSize(); + } + } + return 0L; + } + }; + long result = 0; + for (VM vm : this.serverVMs) { + if (vm != null) { + result += (Long)vm.invoke(getLocalSize); + } + } + assertEquals(2, result); // Close connection, resultset etc... rs.close(); @@ -324,7 +397,6 @@ public static void checkData(String regionName, long numEntries) { * Test conflicts. */ public void testCommitWithConflicts() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; conn.setAutoCommit(false); Statement st = conn.createStatement(); @@ -436,7 +508,6 @@ public void run() { * on failure. */ public void testCommitOnPartitionedAndReplicatedTables() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("Create table t1 (c1 int not null , c2 int not null, " @@ -483,7 +554,6 @@ public void testCommitOnPartitionedAndReplicatedTables() throws Exception { * @throws Exception */ public void testSelectIsolated() throws Exception { - startVMs(1, 2); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("Create table t1 (c1 int not null , c2 int not null, " @@ -534,9 +604,6 @@ public void testSelectIsolated() throws Exception { * @throws Exception */ public void testColocatedPrTransaction() throws Exception { - startVMs(1, 2); - // TestUtil.loadDriver(); - Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -613,7 +680,6 @@ public void testColocatedPrTransaction() throws Exception { * @throws Exception */ public void testCommitAndRollBack() throws Exception { - startVMs(1, 2); // TestUtil.loadDriver(); Properties props = new Properties(); @@ -644,10 +710,10 @@ public void testCommitAndRollBack() throws Exception { } } - TXManagerImpl.waitForPendingCommitForTest(); - // approx. 240 commits/rollbacks gets distributed across 2 nodes. adjust these numbers to a little lower value + // approx. 160 commits/rollbacks gets distributed across 3 nodes. + // adjust these numbers to a little lower value // if unbalanced commits/rollbacks happen. - checkTxStatistics("commit-afterInserts", 240, 240, 240, 240, 500, 501); + checkTxStatistics("commit-afterInserts", 160, 160, 160, 160, 500, 501); ResultSet rs = st.executeQuery("select * from t1"); int numRows = 0; @@ -661,9 +727,9 @@ public void testCommitAndRollBack() throws Exception { conn.commit(); conn.close(); TXManagerImpl.waitForPendingCommitForTest(); - checkTxStatistics("commit-afterSelects", 240, 240, 240, 240, 500, 501); + checkTxStatistics("commit-afterSelects", 160, 160, 160, 160, 500, 501); } - + private void checkTxStatistics(final String comment, final int rCommits1, final int rRollback1, final int rCommits2, final int rRollback2, final int lCommit, final int lRollback) throws Exception { @@ -732,7 +798,7 @@ public void run() throws CacheException { assertTrue("txCommit=" + txCommit + "," + "lCommits=" + lCommit, txCommit >= lCommit ); } else if (isRemoteCheck[0] == 1) { - assertTrue("server " + isRemoteCheck[0] + "txCommit=" + txCommit + "," + "rCommits=" + rCommits1, txCommit >= rCommits1 ); + assertTrue("server " + isRemoteCheck[0] + " txCommit=" + txCommit + "," + "rCommits=" + rCommits1, txCommit >= rCommits1 ); } else if (isRemoteCheck[0] == 2) { assertTrue("server " + isRemoteCheck[0] + " txCommit=" + txCommit + "," + "rCommits=" + rCommits2, txCommit >= rCommits2 ); @@ -754,9 +820,6 @@ else if (isRemoteCheck[0] == 2) { * @throws Exception */ public void testNonTransactionalCommitAndRollback() throws Exception { - startVMs(1, 2); - // TestUtil.loadDriver(); - Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(Connection.TRANSACTION_NONE); conn.setAutoCommit(false); @@ -795,157 +858,6 @@ public void testNonTransactionalCommitAndRollback() throws Exception { conn.close(); } - public void testNonColocatedInsertByPartitioning() throws Exception { - startServerVMs(1, 0, "sg1"); - startServerVMs(1, 0, "sg2"); - startClientVMs(1, 0, null); - // TestUtil.loadDriver(); - - Connection conn = TestUtil.jdbcConn; - System.out.println("XXXX the type of conneciton : " + conn); - Statement st = conn.createStatement(); - st.execute("create schema test default server groups (sg1, sg2)"); - st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int not null , " - + "col3 int, col4 int, col5 varchar(10), Primary Key (PkCol1, PkCol2) ) " - + "Partition by column (PkCol1) server groups (sg1)"+ getSuffix()); - - st.execute("create table test.t2 (PkCol1 int not null, PkCol2 int not null, " - + " col3 int, col4 varchar(10)) Partition by column (PkCol1)" - + " server groups (sg2)"+ getSuffix()); - conn.commit(); - // conn.setTransactionIsolation(getIsolationLevel()); - st.execute("insert into test.t1 values(10, 10, 10, 10, 'XXXX1')"); - st.execute("insert into test.t2 values(10, 10, 10, 'XXXX1')"); - conn.commit(); - - } - - /** - * Test transactional key based updates. - * - * @throws Exception - */ - public void testTransactionalKeyBasedUpdates() throws Exception { - startServerVMs(2, 0, "sg1"); - startClientVMs(1, 0, null); - Connection conn = TestUtil.jdbcConn; - conn.setAutoCommit(false); - System.out.println("XXXX the type of conneciton : " + conn); - Statement st = conn.createStatement(); - st.execute("create schema test default server groups (sg1, sg2)"); - st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int not null , " - + "col3 int, col4 int, col5 varchar(10), Primary Key (PkCol1) ) " - + "Partition by column (PkCol1) server groups (sg1)"+ getSuffix()); - conn.commit(); - conn.setTransactionIsolation(getIsolationLevel()); - PreparedStatement psInsert = conn.prepareStatement("insert into test.t1 " - + "values(?, 10, 10, 10, 'XXXX1')"); - // st.execute("insert into test.t1 values(10, 10, 10, 10, 'XXXX1')"); - for (int i = 0; i < 1000; i++) { - psInsert.setInt(1, i); - psInsert.executeUpdate(); - conn.commit(); - } - // conn.commit(); - ResultSet rs = st.executeQuery("select * from test.t1"); - int numRows = 0; - while (rs.next()) { - assertEquals("Column value should be 10", 10, rs.getInt(3)); - assertEquals("Column value should be 10", 10, rs.getInt(4)); - assertEquals("Column value should be XXXX1", "XXXX1", rs.getString(5) - .trim()); - numRows++; - } - assertEquals("Numbers of rows in resultset should be one", 1000, numRows); - // conn.commit(); - PreparedStatement psUpdate = conn.prepareStatement("update test.t1 set " - + "col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=?"); - // st.execute("update test.t1 set col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=10"); - for (int i = 0; i < 1000; i++) { - psUpdate.setInt(1, i); - psUpdate.executeUpdate(); - conn.commit(); - } - // conn.commit(); - rs = st.executeQuery("select * from test.t1"); - numRows = 0; - while (rs.next()) { - assertEquals("Column value should change", 20, rs.getInt(3)); - assertEquals("Columns value should change", 20, rs.getInt(4)); - assertEquals("Columns value should change", "changed", rs.getString(5) - .trim()); - numRows++; - } - assertEquals("Numbers of rows in resultset should be one", 1000, numRows); - rs.close(); - st.close(); - conn.commit(); - conn.close(); - - } - - /** - * Test updates on tables partitioned by PK. - * - * @throws Exception - */ - public void testTransactionalKeyBasedUpdatePartitionedByPk() throws Exception { - startServerVMs(2, 0, "sg1"); - startClientVMs(1, 0, null); - Connection conn = TestUtil.jdbcConn; - conn.setAutoCommit(false); - Statement st = conn.createStatement(); - st.execute("create schema test default server groups (sg1, sg2)"); - st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int not null , " - + "col3 int, col4 int, col5 varchar(10), Primary Key (PkCol1) ) " - + "Partition by Primary Key server groups (sg1) redundancy 1"+ getSuffix()); - - conn.setTransactionIsolation(getIsolationLevel()); - PreparedStatement psInsert = conn.prepareStatement("insert into test.t1 " - + "values(?, 10, 10, 10, 'XXXX1')"); - // st.execute("insert into test.t1 values(10, 10, 10, 10, 'XXXX1')"); - for (int i = 0; i < 1000; i++) { - psInsert.setInt(1, i); - psInsert.executeUpdate(); - conn.commit(); - } - ResultSet rs = st.executeQuery("select * from test.t1"); - int numRows = 0; - while (rs.next()) { - assertEquals("Column value should be 10", 10, rs.getInt(3)); - assertEquals("Column value should be 10", 10, rs.getInt(4)); - assertEquals("Column value should be XXXX1", "XXXX1", rs.getString(5) - .trim()); - numRows++; - } - assertEquals("Numbers of rows in resultset should be one", 1000, numRows); - - PreparedStatement psUpdate = conn.prepareStatement("update test.t1 set " - + "col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=?"); - // st.execute("update test.t1 set col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=10"); - for (int i = 0; i < 1000; i++) { - psUpdate.setInt(1, i); - psUpdate.executeUpdate(); - conn.commit(); - } - - rs = st.executeQuery("select * from test.t1"); - numRows = 0; - while (rs.next()) { - assertEquals("Column value should change", 20, rs.getInt(3)); - assertEquals("Columns value should change", 20, rs.getInt(4)); - assertEquals("Columns value should change", "changed", rs.getString(5) - .trim()); - numRows++; - } - assertEquals("Numbers of rows in resultset should be one", 1000, numRows); - rs.close(); - st.close(); - conn.commit(); - conn.close(); - - } - /** * DDL followed imediately by commit and then DML. * @@ -954,8 +866,6 @@ public void testTransactionalKeyBasedUpdatePartitionedByPk() throws Exception { public void testNetworkTransactionDDLFollowedByCommitThenDML() throws Exception { - // start three network servers - startServerVMs(1, 0, null); final int netPort = startNetworkServer(1, null, null); final Connection conn = TestUtil.getNetConnection(netPort, "/;user=q;password=q", null); @@ -992,8 +902,6 @@ public void testNetworkTransactionDDLFollowedByCommitThenDML() * @throws Exception */ public void testNetworkDDLFollowedByInsert() throws Exception { - // start three network servers - startServerVMs(1, 0, null); final int netPort = startNetworkServer(1, null, null); final Connection conn = TestUtil.getNetConnection(netPort, "/;user=q;password=q", null); @@ -1049,7 +957,6 @@ public void testNetworkDDLFollowedByInsert() throws Exception { } public void testTransactionalUpdates() throws Exception { - startServerVMs(1, 0, null); final int netPort = startNetworkServer(1, null, null); final Connection conn = TestUtil.getNetConnection(netPort, "/;user=q;password=q", null); @@ -1121,66 +1028,7 @@ public void testTransactionalUpdates() throws Exception { conn.close(); } - public void testIndexMaintenanceOnPrimaryAndSecondary() throws Exception { - startServerVMs(2, 0, "sg1"); - startClientVMs(1, 0, null); - Properties props = new Properties(); - props.setProperty(Attribute.TX_SYNC_COMMITS, "true"); - final Connection conn = TestUtil.getConnection(props); - Statement st = conn.createStatement(); - st.execute("create schema test default server groups (sg1, sg2)"); - st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int " - + "not null , col3 int, col4 int, col5 varchar(10), Primary Key(PkCol1)" - + ") Partition by Primary Key server groups (sg1) redundancy 1"+ getSuffix()); - conn.commit(); - st.execute("create index IndexCol4 on test.t1 (col4)"); - conn.commit(); - - conn.setTransactionIsolation(getIsolationLevel()); - conn.setAutoCommit(false); - final int numRows = 10; - VM server1 = this.serverVMs.get(0); - VM server2 = this.serverVMs.get(1); - server1.invoke(getClass(), "installIndexObserver", - new Object[] { "test.IndexCol4", null }); - server2.invoke(getClass(), "installIndexObserver", - new Object[] { "test.IndexCol4", null }); - PreparedStatement psInsert = conn.prepareStatement("insert into test.t1 " - + "values(?, 10, 10, 10, 'XXXX1')"); - for (int i = 0; i < numRows; i++) { - psInsert.setInt(1, i); - psInsert.executeUpdate(); - conn.commit(); - } - - server1.invoke(getClass(), "checkIndexAndReset", - new Object[] { Integer.valueOf(numRows), Integer.valueOf(0) }); - server2.invoke(getClass(), "checkIndexAndReset", - new Object[] { Integer.valueOf(numRows), Integer.valueOf(0) }); - - PreparedStatement psUpdate = conn.prepareStatement("update test.t1 set " - + "col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=?"); - for (int i = 0; i < numRows; i++) { - psUpdate.setInt(1, i); - psUpdate.executeUpdate(); - conn.commit(); - } - - server1.invoke(getClass(), "checkIndexAndReset", new Object[] { - Integer.valueOf(numRows * 2), Integer.valueOf(numRows) }); - server2.invoke(getClass(), "checkIndexAndReset", new Object[] { - Integer.valueOf(numRows * 2), Integer.valueOf(numRows) }); - - server1.invoke(getClass(), "resetIndexObserver"); - server2.invoke(getClass(), "resetIndexObserver"); - - st.close(); - conn.close(); - } - public void testXATransactionFromClient_commit() throws Exception { - startServerVMs(2, 0, null); - startClientVMs(1, 0, null); final int netport = startNetworkServer(1, null, null); serverSQLExecute(1, "create schema test"); serverSQLExecute(1, "create table test.XATT2 (intcol int not null, text varchar(100) not null)"+ getSuffix()); @@ -1236,9 +1084,6 @@ public void testXATransactionFromClient_commit() throws Exception { } public void testXATransactionFromPeerClient_commit() throws Exception { - startServerVMs(2, 0, null); - startClientVMs(1, 0, null); - serverSQLExecute(1, "create schema test"); serverSQLExecute(1, "create table test.XATT2 (intcol int not null, text varchar(100) not null)"+ getSuffix()); serverSQLExecute(1, "insert into test.XATT2 values (1, 'ONE')"); @@ -1288,8 +1133,6 @@ public void testXATransactionFromPeerClient_commit() throws Exception { } public void testXATransactionFromClient_rollback() throws Exception { - startServerVMs(2, 0, null); - startClientVMs(1, 0, null); final int netport = startNetworkServer(1, null, null); serverSQLExecute(1, "create schema test"); serverSQLExecute(1, "create table test.XATT2 (intcol int not null, text varchar(100) not null)"+ getSuffix()); @@ -1345,9 +1188,6 @@ public void testXATransactionFromClient_rollback() throws Exception { } public void testXATransactionFromPeerClient_rollback() throws Exception { - startServerVMs(2, 0, null); - startClientVMs(1, 0, null); - serverSQLExecute(1, "create schema test"); serverSQLExecute(1, "create table test.XATT2 (intcol int not null, text varchar(100) not null)"+ getSuffix()); serverSQLExecute(1, "insert into test.XATT2 values (1, 'ONE')"); @@ -1404,6 +1244,7 @@ public static void waitForPendingCommit() { * Install an observer called during index maintenance. */ public static void installIndexObserver(String name, TXId txId) { + if (GemFireCacheImpl.getInstance() == null) return; CheckIndexOperations checkIndex = new CheckIndexOperations(name, txId); GemFireXDQueryObserver old = GemFireXDQueryObserverHolder .setInstance(checkIndex); @@ -1422,11 +1263,44 @@ public static void checkIndexAndReset(int numInsertExpected, checkIndex.checkNumDeletes(numDeletesExpected); } + /** + * Check index operations and reset the holder. + */ + public static void checkIndexAndResetAll(int numInsertExpected, + int numDeletesExpected) throws Exception { + + TXManagerImpl.waitForPendingCommitForTest(); + long numInserted = 0; + long numDeleted = 0; + Map results = invokeInEveryVM(new SerializableCallable() { + @Override + public Object call() throws Exception { + CheckIndexOperations checkIndex; + if (GemFireCacheImpl.getInstance() != null && + (checkIndex = GemFireXDQueryObserverHolder.getObserver( + CheckIndexOperations.class)) != null) { + return ((long)checkIndex.numInserts << 32L) | (long)checkIndex.numDeletes; + } else { + return 0L; + } + } + }); + for (Object o : results.values()) { + if (o != null) { + long v = (Long)o; + numInserted += (v >> 32L); + numDeleted += (v & 0xffffffffL); + } + } + assertEquals(numInsertExpected, numInserted); + assertEquals(numDeletesExpected, numDeleted); + } + /** * Reset index observer. */ public static void resetIndexObserver() throws Exception { - + if (GemFireCacheImpl.getInstance() == null) return; GemFireXDQueryObserverHolder.clearInstance(); } @@ -1492,87 +1366,11 @@ private boolean checkTX() { } } - public void testBug41694() throws Exception { - startServerVMs(2, 0, "sg1"); - startClientVMs(1, 0, null); - Connection conn = TestUtil.jdbcConn; - Statement st = conn.createStatement(); - st.execute("create schema test default server groups (sg1, sg2)"); - st.execute("create table test.t1 ( PkCol1 int not null, PkCol2 int not null, " - + "col3 int, col4 int, col5 varchar(10),col6 int, col7 int, col8 int, " - + "col9 int, col10 int, col11 int, col12 int, col13 int, col14 int, " - + "col15 int, col16 int, col17 int, col18 int, col19 int, col20 int, " - + "col21 int,col22 int, col23 int, col24 int, col25 int, col26 int, " - + "col27 int, col28 int, col29 int, col30 int, col31 int, col32 int," - + " col33 int, col34 int, col35 int, col36 int, col37 int, col38 int, " - + "col39 int, col40 int, col41 int, col42 int, col43 int, col44 int, " - + "col45 int, col46 int, col47 int, col48 int, col49 int, col50 int, " - + "col51 int, col52 int, col53 int, col54 int, col55 int, col56 int, " - + "col57 int, col58 int, col59 int, col60 int, col61 int, col62 int, " - + "col63 int, col64 int, col65 int, col66 int, col67 int, col68 int, " - + "col69 int, col70 int, col71 int, col72 int, col73 int, col74 int, " - + "col75 int, col76 int, col77 int, col78 int, col79 int, col80 int, " - + "col81 int, col82 int, col83 int, col84 int, col85 int, col86 int, " - + "col87 int, col88 int, col89 int, col90 int, col91 int, col92 int, " - + "col93 int, col94 int, col95 int, col96 int, col97 int, col98 int, " - + "col99 int, col100 int, Primary Key (PkCol1) ) " - + "Partition by Primary Key server groups (sg1) redundancy 1"+ getSuffix()); - conn.commit(); - st.execute("create index IndexCol4 on test.t1 (col4)"); - conn.commit(); - - conn.setTransactionIsolation(getIsolationLevel()); - conn.setAutoCommit(false); - final int numRows = 1; - PreparedStatement psInsert = conn.prepareStatement("insert into test.t1 " - + "values(?, 1000, 1000, 1000, 'XXXX1'" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000 " - + " , 1000, 1000, 1000, 1000, 1000 " - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000" - + " , 1000, 1000, 1000, 1000, 1000 )"); - // st.execute("insert into test.t1 values(10, 10, 10, 10, 'XXXX1')"); - for (int i = 0; i < numRows; i++) { - psInsert.setInt(1, i); - psInsert.executeUpdate(); - conn.commit(); - } - - PreparedStatement psUpdate = conn.prepareStatement("update test.t1 set " - + "col3 = 20 where PkCol1=?"); - // st.execute("update test.t1 set col3 = 20, col4 = 20, col5 = 'changed' where PkCol1=10"); - for (int i = 0; i < 1000; i++) { - // Update the same row over and over should not cause #41694, - // negative bucket size(memory consumed by bucket). - psUpdate.setInt(1, 0); - psUpdate.executeUpdate(); - conn.commit(); - } - - st.close(); - conn.commit(); - } - /** Simple test case of timing inserts. */ public void testUseCase_timeInserts() throws Exception { // reduce logs reduceLogLevelForTest("warning"); - startVMs(0, 1); final int netPort = startNetworkServer(1, null, null); final Connection conn = TestUtil.getNetConnection(netPort, "/;user=app;password=app", null); @@ -1619,7 +1417,6 @@ protected void dumpIntoTable(Connection conn, int rows) throws SQLException { * @throws Exception */ public void testTransactionalKeyBasedDeletes() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema tran"); @@ -1649,7 +1446,6 @@ public void testTransactionalKeyBasedDeletes() throws Exception { * @throws Exception */ public void testTransactionalDeleteWithLocalIndexes() throws Exception { - startVMs(1, 1); Properties props = new Properties(); props.setProperty(Attribute.TX_SYNC_COMMITS, "true"); final Connection conn = TestUtil.getConnection(props); @@ -1674,15 +1470,13 @@ public void testTransactionalDeleteWithLocalIndexes() throws Exception { + "where c1 = ?"); conn.commit(); - // GemFireXDQueryObserver old = null; - VM server1 = this.serverVMs.get(0); // below check also forces a new TX to start for getCurrentTXId() call ResultSet rs = conn.createStatement().executeQuery( "select count(*) from tran.t1"); assertTrue(rs.next()); assertEquals(numRows, rs.getInt(1)); assertFalse(rs.next()); - server1.invoke(getClass(), "installIndexObserver", + invokeInEveryVM(getClass(), "installIndexObserver", new Object[] { "tran.IndexCol2", TXManagerImpl.getCurrentTXId() }); try { // old = GemFireXDQueryObserverHolder.setInstance(checkIndex); @@ -1699,9 +1493,8 @@ public void testTransactionalDeleteWithLocalIndexes() throws Exception { // GemFireXDQueryObserverHolder.clearInstance(); // } } - server1.invoke(getClass(), "checkIndexAndReset", - new Object[] { Integer.valueOf(0), Integer.valueOf(numRows) }); - server1.invoke(getClass(), "resetIndexObserver"); + checkIndexAndResetAll(0, numRows); + invokeInEveryVM(getClass(), "resetIndexObserver"); st.close(); conn.close(); @@ -1714,7 +1507,6 @@ public void testTransactionalDeleteWithLocalIndexes() throws Exception { */ public void testTransactionalDeleteWithLocalIndexesClientServer() throws Exception { - // startServerVMs(1); final int netPort = startNetworkServer(1, null, null); final Connection conn = TestUtil.getNetConnection(netPort, "/;user=app;password=app", null); @@ -1738,8 +1530,7 @@ public void testTransactionalDeleteWithLocalIndexesClientServer() } // GemFireXDQueryObserver old = null; - VM server1 = this.serverVMs.get(0); - server1.invoke(getClass(), "installIndexObserver", + invokeInEveryVM(getClass(), "installIndexObserver", new Object[] { "tran.IndexCol2", null }); PreparedStatement psDelete = conn.prepareStatement("delete from tran.t1 " + "where c1 = ?"); @@ -1750,9 +1541,9 @@ public void testTransactionalDeleteWithLocalIndexesClientServer() conn.commit(); } - server1.invoke(getClass(), "checkIndexAndReset", - new Object[] { Integer.valueOf(0), Integer.valueOf(numRows) }); - server1.invoke(getClass(), "resetIndexObserver"); + checkIndexAndResetAll(0, numRows); + invokeInEveryVM(getClass(), "resetIndexObserver"); + st.execute("drop table tran.t1"); st.execute("drop schema tran restrict"); @@ -1769,7 +1560,6 @@ public void testTransactionalDeleteWithLocalIndexesClientServer() */ public void testTransactionalDeleteWithLocalIndexesClientServerReplicatedTable() throws Exception { - // startServerVMs(1); final int netPort = startNetworkServer(1, null, null); final Connection conn = TestUtil.getNetConnection(netPort, "/;user=app;password=app", null); @@ -1783,8 +1573,7 @@ public void testTransactionalDeleteWithLocalIndexesClientServerReplicatedTable() conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); - VM server1 = this.serverVMs.get(0); - server1.invoke(getClass(), "installIndexObserver", + invokeInEveryVM(getClass(), "installIndexObserver", new Object[] { "tran.IndexCol2", null }); int numRows = 1000; @@ -1797,7 +1586,12 @@ public void testTransactionalDeleteWithLocalIndexesClientServerReplicatedTable() conn.commit(); } - server1.invoke(getClass(), "checkIndexAndReset", + TXManagerImpl.waitForPendingCommitForTest(); + getServerVM(1).invoke(getClass(), "checkIndexAndReset", + new Object[] { Integer.valueOf(numRows), Integer.valueOf(0) }); + getServerVM(2).invoke(getClass(), "checkIndexAndReset", + new Object[] { Integer.valueOf(numRows), Integer.valueOf(0) }); + getServerVM(3).invoke(getClass(), "checkIndexAndReset", new Object[] { Integer.valueOf(numRows), Integer.valueOf(0) }); // final CheckIndexOperations checkIndex = new @@ -1806,20 +1600,22 @@ public void testTransactionalDeleteWithLocalIndexesClientServerReplicatedTable() PreparedStatement psDelete = conn.prepareStatement("delete from tran.t1 " + "where c1 = ?"); - try { - // old = GemFireXDQueryObserverHolder.setInstance(checkIndex); - for (int i = 0; i < numRows; i++) { - psDelete.setInt(1, i); - psDelete.executeUpdate(); - conn.commit(); - } - } finally { + // old = GemFireXDQueryObserverHolder.setInstance(checkIndex); + for (int i = 0; i < numRows; i++) { + psDelete.setInt(1, i); + psDelete.executeUpdate(); + conn.commit(); } - server1.invoke(getClass(), "checkIndexAndReset", + TXManagerImpl.waitForPendingCommitForTest(); + getServerVM(1).invoke(getClass(), "checkIndexAndReset", new Object[] { Integer.valueOf(numRows), Integer.valueOf(numRows) }); - server1.invoke(getClass(), "resetIndexObserver"); + getServerVM(2).invoke(getClass(), "checkIndexAndReset", + new Object[] { Integer.valueOf(numRows), Integer.valueOf(numRows) }); + getServerVM(3).invoke(getClass(), "checkIndexAndReset", + new Object[] { Integer.valueOf(numRows), Integer.valueOf(numRows) }); + invokeInEveryVM(getClass(), "resetIndexObserver"); st.execute("drop table tran.t1"); st.execute("drop schema tran restrict"); @@ -1835,7 +1631,6 @@ public void testTransactionalDeleteWithLocalIndexesClientServerReplicatedTable() * @throws Exception */ public void testUpdateWithFunctionExecution() throws Exception { - startVMs(1, 2); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema trade"); @@ -1910,8 +1705,6 @@ public void testUpdateWithFunctionExecution() throws Exception { * @throws Exception */ public void testNetworkFailedDDLFollowedByInsert() throws Exception { - // start three network servers - startServerVMs(1, 0, null); final int netPort = startNetworkServer(1, null, null); final Connection conn = TestUtil.getNetConnection(netPort, "/;user=q;password=q", null); @@ -1946,7 +1739,6 @@ public void testNetworkFailedDDLFollowedByInsert() throws Exception { } public void test41679() throws Exception { - startVMs(1, 2); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema trade"); @@ -2003,7 +1795,6 @@ public void test41679() throws Exception { * Unique constraint violation on remote node. */ public void testUniquenessFailure() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema tran"); @@ -2071,7 +1862,6 @@ public void testUniquenessFailure() throws Exception { * @throws Exception */ public void testUniquenessFailureReplicatedTable() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema tran"); @@ -2112,7 +1902,6 @@ public void testUniquenessFailureReplicatedTable() throws Exception { } public void testNonKeyBasedTransactionalUpdates() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema trade"); @@ -2180,7 +1969,6 @@ public void testNonKeyBasedTransactionalUpdates() throws Exception { public void testNonKeyBasedTransactionalUpdatesRollbackAndCommit() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema trade"); @@ -2269,7 +2057,6 @@ public void testNonKeyBasedTransactionalUpdatesRollbackAndCommit() } public void testNonKeyBasedTransactionalUpdatesAndConflict() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema trade"); @@ -2588,7 +2375,6 @@ public void testBulkTransactionalUpdatesRollbackAndCommitClientServerReplicateTa public void testNonKeyBasedTransactionalUpdatesRollbackAndCommitReplicateTable() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema trade"); @@ -2675,120 +2461,7 @@ public void testNonKeyBasedTransactionalUpdatesRollbackAndCommitReplicateTable() conn.close(); } - public void testBug41970_43473() throws Throwable { - startVMs(1, 1); - Connection conn = TestUtil.jdbcConn; - conn.setTransactionIsolation(getIsolationLevel()); - conn.setAutoCommit(false); - Statement st = conn.createStatement(); - st.execute("create table customers (cid int not null, cust_name " - + "varchar(100), addr varchar(100), tid int, primary key (cid))"); - st.execute("create table trades (tid int, cid int, eid int, primary Key " - + "(tid), foreign key (cid) references customers (cid))"+ getSuffix()); - PreparedStatement pstmt = conn - .prepareStatement("insert into customers values(?,?,?,?)"); - pstmt.setInt(1, 1); - pstmt.setString(2, "name1"); - pstmt.setString(3, "add1"); - pstmt.setInt(4, 1); - pstmt.executeUpdate(); - pstmt.setInt(1, 2); - pstmt.setString(2, "name2"); - pstmt.setString(3, "add2"); - pstmt.setInt(4, 1); - pstmt.executeUpdate(); - conn.commit(); - - ResultSet rs = st.executeQuery("Select * from customers"); - int numRows = 0; - while (rs.next()) { - // Checking number of rows returned, since ordering of results - // is not guaranteed. - numRows++; - } - assertEquals("ResultSet should contain two rows ", 2, numRows); - rs.close(); - conn.commit(); - - // test for #43473 - st.execute("create table sellorders (oid int not null primary key, " - + "cid int, order_time timestamp, status varchar(10), " - + "constraint ch check (status in ('cancelled', 'open', 'filled')))"+ getSuffix()); - pstmt = conn.prepareStatement("insert into sellorders values (?, ?, ?, ?)"); - final long currentTime = System.currentTimeMillis(); - final Timestamp ts = new Timestamp(currentTime - 100); - final Timestamp now = new Timestamp(currentTime); - for (int id = 1; id <= 100; id++) { - pstmt.setInt(1, id); - pstmt.setInt(2, id * 2); - pstmt.setTimestamp(3, ts); - pstmt.setString(4, "open"); - pstmt.execute(); - } - conn.commit(); - - final CyclicBarrier barrier = new CyclicBarrier(2); - final Throwable[] failure = new Throwable[1]; - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Connection conn2 = TestUtil.getConnection(); - conn2.setTransactionIsolation(getIsolationLevel()); - conn2.setAutoCommit(false); - PreparedStatement pstmt2 = conn2 - .prepareStatement("update sellorders set cid = ? where oid = ?"); - pstmt2.setInt(1, 7); - pstmt2.setInt(2, 3); - assertEquals(1, pstmt2.executeUpdate()); - pstmt2.setInt(1, 3); - pstmt2.setInt(2, 1); - assertEquals(1, pstmt2.executeUpdate()); - - // use a barrier to force txn1 to wait after first EX lock upgrade - // and txn2 to wait before EX_SH lock acquisition - getServerVM(1).invoke(TransactionDUnit.class, "installObservers"); - barrier.await(); - conn2.commit(); - } catch (Throwable t) { - failure[0] = t; - } - } - }); - t.start(); - - pstmt = conn.prepareStatement("update sellorders " - + "set status = 'cancelled' where order_time < ? and status = 'open'"); - pstmt.setTimestamp(1, now); - barrier.await(); - try { - pstmt.executeUpdate(); - fail("expected conflict exception"); - } catch (SQLException sqle) { - if (!"X0Z02".equals(sqle.getSQLState())) { - throw sqle; - } - } - conn.close(); - - t.join(); - - if (failure[0] != null) { - throw failure[0]; - } - - // clear the observers - serverExecute(1, new SerializableRunnable() { - @Override - public void run() { - GemFireCacheImpl.getExisting().getTxManager().setObserver(null); - GemFireXDQueryObserverHolder.clearInstance(); - } - }); - } - public void testBug41976() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -2849,7 +2522,6 @@ public void testBug41976() throws Exception { } public void testBug41956() throws Exception { - startVMs(2, 2); Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -2923,7 +2595,6 @@ public void run() { } public void testBug41974() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -2956,9 +2627,6 @@ public void testBug41974() throws Exception { public void testBug42014() throws Exception { // Create a table from client using partition by column - // Start one client and three servers - startVMs(1, 3); - clientSQLExecute(1, "create table trade.portfolio (cid int not null, " + "sid int not null, qty int not null, availQty int not null, " + "subTotal decimal(30,20), tid int) " @@ -3002,179 +2670,7 @@ public void testBug42014() throws Exception { conn.commit(); } - public void testBug42031IsolationAndTXData() throws Exception { - // Create the controller VM as client which belongs to default server group - startClientVMs(1, 0, null); - startServerVMs(1, -1, "SG1"); - // create table - clientSQLExecute(1, "create table TESTTABLE (ID int not null primary key, " - + "DESCRIPTION varchar(1024), ADDRESS varchar(1024), ID1 int)"+ getSuffix()); - - Connection conn = TestUtil.jdbcConn; - conn.setTransactionIsolation(getIsolationLevel()); - conn.setAutoCommit(false); - Statement stmt = conn.createStatement(); - // Do an insert in sql fabric. This will create a primary bucket on the lone - // server VM - // with bucket ID =1 - stmt.executeUpdate("Insert into TESTTABLE values(114,'desc114','Add114',114)"); - - stmt.executeUpdate("Insert into TESTTABLE values(1,'desc1','Add1',1)"); - stmt.executeUpdate("Insert into TESTTABLE values(227,'desc227','Add227',227)"); - stmt.executeUpdate("Insert into TESTTABLE values(340,'desc340','Add340',340)"); - conn.rollback(); - stmt.executeUpdate("Insert into TESTTABLE values(114,'desc114','Add114',114)"); - stmt.executeUpdate("Insert into TESTTABLE values(2,'desc1','Add1',1)"); - stmt.executeUpdate("Insert into TESTTABLE values(224,'desc227','Add227',227)"); - stmt.executeUpdate("Insert into TESTTABLE values(331,'desc340','Add340',340)"); - conn.commit(); - // Bulk Update - stmt.executeUpdate("update TESTTABLE set ID1 = ID1 +1 "); - ResultSet rs = stmt.executeQuery("select ID1 from TESTTABLE"); - Set expected = new HashSet(); - expected.add(Integer.valueOf(1)); - expected.add(Integer.valueOf(227)); - expected.add(Integer.valueOf(340)); - expected.add(Integer.valueOf(114)); - Set expected2 = new HashSet(); - expected2.add(Integer.valueOf(2)); - expected2.add(Integer.valueOf(228)); - expected2.add(Integer.valueOf(341)); - expected2.add(Integer.valueOf(115)); - - int numRows = 0; - while (rs.next()) { - Integer got = Integer.valueOf(rs.getInt(1)); - assertTrue(expected2.contains(got)); - ++numRows; - } - assertEquals(expected2.size(), numRows); - - // rollback and check original values - conn.rollback(); - - rs = stmt.executeQuery("select ID1 from TESTTABLE"); - numRows = 0; - while (rs.next()) { - Integer got = Integer.valueOf(rs.getInt(1)); - assertTrue(expected.contains(got)); - ++numRows; - } - assertEquals(expected.size(), numRows); - - // now commit and check success - stmt.executeUpdate("update TESTTABLE set ID1 = ID1 +1 "); - rs = stmt.executeQuery("select ID1 from TESTTABLE"); - numRows = 0; - while (rs.next()) { - Integer got = Integer.valueOf(rs.getInt(1)); - assertTrue(expected2.contains(got)); - ++numRows; - } - assertEquals(expected2.size(), numRows); - - conn.commit(); - - rs = stmt.executeQuery("select ID1 from TESTTABLE"); - numRows = 0; - while (rs.next()) { - Integer got = Integer.valueOf(rs.getInt(1)); - assertTrue(expected2.contains(got)); - ++numRows; - } - assertEquals(expected2.size(), numRows); - } - - public void testBug41873_1() throws Exception { - - // Create the controller VM as client which belongs to default server group - startClientVMs(1, 0, null); - startServerVMs(2, -1, "SG1"); - Connection conn = TestUtil.jdbcConn; - conn.setTransactionIsolation(getIsolationLevel()); - conn.setAutoCommit(false); - // create table - clientSQLExecute(1, "Create table t1 (c1 int not null , c2 int not null, " - + "c3 int not null, c4 int not null) redundancy 1 " - + "partition by column (c1) "+ getSuffix()); - conn.commit(); - Statement st = conn.createStatement(); - st.execute("insert into t1 values (1, 1,1,1)"); - st.execute("insert into t1 values (114, 114,114,114)"); - conn.commit(); - st.execute("update t1 set c2 =2 where c1 =1"); - st.execute("update t1 set c3 =3 where c1 =1"); - st.execute("update t1 set c4 =4 where c1 =1"); - st.execute("update t1 set c2 =3 where c1 = 114"); - st.execute("update t1 set c3 =4 where c1 =114"); - st.execute("update t1 set c4 =5 where c1 =114"); - conn.commit(); - ResultSet rs = st.executeQuery("Select * from t1 where c1 = 1"); - rs.next(); - assertEquals(1, rs.getInt(1)); - assertEquals(2, rs.getInt(2)); - assertEquals(3, rs.getInt(3)); - assertEquals(4, rs.getInt(4)); - - rs = st.executeQuery("Select * from t1 where c1 = 114"); - rs.next(); - assertEquals(114, rs.getInt(1)); - assertEquals(3, rs.getInt(2)); - assertEquals(4, rs.getInt(3)); - assertEquals(5, rs.getInt(4)); - conn.commit(); - } - - public void testBug42067_1() throws Exception { - - // Create the controller VM as client which belongs to default server group - startClientVMs(1, 0, null); - startServerVMs(2, -1, "SG1"); - Connection conn = TestUtil.jdbcConn; - conn.setTransactionIsolation(getIsolationLevel()); - conn.setAutoCommit(false); - // create table - clientSQLExecute(1, "Create table t1 (c1 int not null, " - + "c2 int not null, c3 int not null, c4 int not null) " - + "redundancy 1 partition by column (c1) "+ getSuffix()); - conn.commit(); - Statement st = conn.createStatement(); - st.execute("insert into t1 values (1, 1,1,1)"); - st.execute("insert into t1 values (114, 114,114,114)"); - conn.commit(); - st.execute("delete from t1 where c1 =1 and c3 =1"); - st.execute("update t1 set c2 =2 where c1 =1 and c3 =1"); - conn.commit(); - } - - public void testBug42067_2() throws Exception { - - // Create the controller VM as client which belongs to default server group - startClientVMs(1, 0, null); - startServerVMs(2, -1, "SG1"); - Connection conn = TestUtil.jdbcConn; - conn.setTransactionIsolation(getIsolationLevel()); - conn.setAutoCommit(false); - // create table - clientSQLExecute(1, "Create table t1 (c1 int not null primary key, " - + "c2 int not null, c3 int not null, c4 int not null) " - + "redundancy 1 partition by column (c1) "+ getSuffix()); - conn.commit(); - Statement st = conn.createStatement(); - st.execute("insert into t1 values (1, 1,1,1)"); - st.execute("insert into t1 values (114, 114,114,114)"); - conn.commit(); - st.execute("delete from t1 where c1 =1 and c3 =1"); - st.execute("update t1 set c2 =2 where c1 =1 and c3 =1"); - conn.commit(); - ResultSet rs = st.executeQuery("select * from t1"); - assertTrue(rs.next()); - assertEquals(114, rs.getInt(1)); - assertFalse(rs.next()); - } - public void testBug42311_1() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -3191,7 +2687,6 @@ public void testBug42311_1() throws Exception { } public void testBug42311_2() throws Exception { - startVMs(1, 1); Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -3216,7 +2711,6 @@ public void testBug42311_2() throws Exception { public void testBugPutAllDataLossAsBuggyDistribution() throws Exception { reduceLogLevelForTest("config"); - startServerVMs(2, 0, null); final int netPort = startNetworkServer(1, null, null); final Connection conn = TestUtil.getNetConnection(netPort, null, null); @@ -3280,7 +2774,6 @@ public static void verifyNumEntries(int expectedNum, int bucketid, } public void testMultipleInsertFromThinClient_bug44242() throws Exception { - startServerVMs(2, 0, null); int port = startNetworkServer(1, null, null); Connection netConn = TestUtil.getNetConnection(port, null, null); netConn.createStatement().execute("create schema emp"); @@ -3343,7 +2836,6 @@ public void testMultipleInsertFromThinClient_bug44242() throws Exception { } // FK related tests public void testFK_NoGlobalIndex_differentThread() throws Exception { - startVMs(1, 2); Connection conn = TestUtil.jdbcConn; clientSQLExecute(1, "Create table t1 (c1 int not null primary key, " + "c2 int not null, c3 int not null)"+ getSuffix()); @@ -3419,7 +2911,6 @@ public void run() { } public void testFK_NoGlobalIndexSameThread() throws Exception { - startVMs(1, 2); Connection conn = TestUtil.jdbcConn; clientSQLExecute(1, "Create table t1 (c1 int not null primary key, " + "c2 int not null, c3 int not null)"+ getSuffix()); @@ -3474,7 +2965,6 @@ public void testFK_NoGlobalIndexSameThread() throws Exception { } public void testFK_GlobalIndexDifferentThread() throws Exception { - startVMs(1, 2); Connection conn = TestUtil.jdbcConn; clientSQLExecute(1, "Create table t1 (c1 int not null primary key, " + "c2 int not null, c3 int not null) partition by column(c2)"+ getSuffix()); @@ -3550,7 +3040,6 @@ public void run() { } public void testFK_GlobalIndexSameThread() throws Exception { - startVMs(1, 2); Connection conn = TestUtil.jdbcConn; clientSQLExecute(1, "Create table t1 (c1 int not null primary key, " + "c2 int not null, c3 int not null) partition by column(c2)"+ getSuffix()); @@ -3605,8 +3094,6 @@ public void testFK_GlobalIndexSameThread() throws Exception { } public void testFKWithBatching_49371() throws Throwable { - startVMs(1, 2); - Connection conn = TestUtil.jdbcConn; Statement stmt = conn.createStatement(); @@ -3855,8 +3342,6 @@ public void run() { * or updates) in another TX. */ public void test42822() throws Exception { - startVMs(1, 3); - Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -3923,8 +3408,6 @@ public void run() { /** Test for "sync-commits" property. */ public void testSyncCommits() throws Throwable { - startVMs(1, 3); - final int netPort = startNetworkServer(2, null, null); Properties props = new Properties(); @@ -4072,7 +3555,6 @@ public void run() { * guaranteed. This is for testing that feature. */ public void testBasicPersistence() throws Exception { - startVMs(1, 3); createDiskStore(false, 2); final int totalOps = 1000; @@ -4194,7 +3676,7 @@ public void testBasicPersistence() throws Exception { * Check that transaction continues fine after new node join. */ public void testNewNodeHA() throws Throwable { - startVMs(1, 2); + stopVMNum(-3); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); st.execute("create schema tran"); @@ -4252,7 +3734,7 @@ public void run() { st.execute("insert into tran.t1 values (50, 50)"); // start a new store VM and TXns should continue - startVMs(0, 1); + restartVMNums(-3); Connection conn2 = TestUtil.getConnection(); Statement st2 = conn2.createStatement(); @@ -4306,9 +3788,6 @@ public void run() { } }); - startClientVMs(1, 0, null); - startServerVMs(1, 0, null); - Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); @@ -4426,7 +3905,7 @@ public void run() { public void testDeltaGII_51366() throws Exception { reduceLogLevelForTest("config"); - startVMs(1, 2); + stopVMNum(-3); Properties props = new Properties(); props.setProperty("sync-commits", "true"); @@ -4465,7 +3944,7 @@ public void testDeltaGII_51366() throws Exception { } // start a new node in the middle of transaction - startVMs(0, 1); + restartVMNums(-3); stmt.execute("call sys.rebalance_all_buckets()"); for (int i = 2 * numBaseInserts; i < 3 * numBaseInserts; i++) { @@ -4580,6 +4059,8 @@ public String description() { stopVMNum(-3); verify_test51366(6 * numBaseInserts); + + restartVMNums(-1, -3); } private void verify_test51366(final int totalInserts) @@ -4683,79 +4164,11 @@ public void createDiskStore(boolean useClient, int vmNum) throws Exception { } } - public static void installObservers() { - final CyclicBarrier testBarrier = new CyclicBarrier(2); - final ConcurrentHashMap waitDone = - new ConcurrentHashMap(2); - - TransactionObserver txOb1 = new TransactionObserverAdapter() { - boolean firstCall = true; - - @Override - public void beforeIndividualLockUpgradeInCommit(TXStateProxy tx, - TXEntryState entry) { - if (this.firstCall) { - this.firstCall = false; - return; - } - if (waitDone.putIfAbsent(tx, Boolean.TRUE) == null) { - SanityManager.DEBUG_PRINT("info:TEST", - "TXObserver: waiting on testBarrier, count=" - + testBarrier.getNumberWaiting()); - try { - testBarrier.await(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } - - @Override - public void afterIndividualRollback(TXStateProxy tx, Object callbackArg) { - // release the barrier for the committing TX - if (waitDone.putIfAbsent(tx, Boolean.TRUE) == null) { - try { - testBarrier.await(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } - }; - - GemFireXDQueryObserver ob2 = new GemFireXDQueryObserverAdapter() { - @Override - public void lockingRowForTX(TXStateProxy tx, GemFireContainer container, - RegionEntry entry, boolean writeLock) { - if (!writeLock - && ExclusiveSharedSynchronizer.isExclusive(entry.getState()) - && waitDone.putIfAbsent(tx, Boolean.TRUE) == null) { - SanityManager.DEBUG_PRINT("info:TEST", - "GFXDObserver: waiting on testBarrier, count=" - + testBarrier.getNumberWaiting()); - try { - testBarrier.await(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - } - }; - - final TXManagerImpl txMgr = GemFireCacheImpl.getExisting().getTxManager(); - for (TXStateProxy tx : txMgr.getHostedTransactionsInProgress()) { - tx.setObserver(txOb1); - } - txMgr.setObserver(txOb1); - GemFireXDQueryObserverHolder.setInstance(ob2); - } - protected int getIsolationLevel() { return Connection.TRANSACTION_READ_COMMITTED; } public void test49667() throws Exception { - startVMs(1, 2); Connection conn = TestUtil.jdbcConn; clientSQLExecute( 1, @@ -4811,8 +4224,6 @@ public void test49667() throws Exception { * @throws Exception */ public void testGFXDDeleteWithConcurrency() throws Exception { - startVMs(0, 2); - startVMs(1, 0); createDiskStore(true, 1); // Create a schema clientSQLExecute(1, "create schema trade"); @@ -4835,7 +4246,7 @@ public void testGFXDDeleteWithConcurrency() throws Exception { } conn.commit(); Statement st = conn.createStatement(); - boolean b = st.execute("delete from trade.customers where cid = 4"); + st.execute("delete from trade.customers where cid = 4"); conn.commit(); expected.remove(4); @@ -4862,7 +4273,6 @@ public void testGFXDDeleteWithConcurrency() throws Exception { Statement s = conn.createStatement(); s.execute("select * from trade.customers"); ResultSet rs = s.getResultSet(); - rs = s.getResultSet(); Map received = new HashMap(); while(rs.next()) { @@ -4870,5 +4280,6 @@ public void testGFXDDeleteWithConcurrency() throws Exception { } assertEquals(expected,received); } + restartVMNums(-1); } } diff --git a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/transactions/TransactionRRDUnit.java b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/transactions/TransactionRRDUnit.java index 80239d068..0048e8d7c 100644 --- a/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/transactions/TransactionRRDUnit.java +++ b/gemfirexd/tools/src/dunit/java/com/pivotal/gemfirexd/transactions/TransactionRRDUnit.java @@ -39,8 +39,6 @@ public TransactionRRDUnit(String name) { */ @Override public void test42822() throws Exception { - startVMs(1, 3); - Connection conn = TestUtil.jdbcConn; conn.setTransactionIsolation(getIsolationLevel()); conn.setAutoCommit(false); @@ -134,7 +132,6 @@ public void run() { * commit before its commit. */ public void testRepeatableRead() throws Exception { - startVMs(1, 3); Connection conn = TestUtil.jdbcConn; Statement st = conn.createStatement(); ResultSet rs; diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/TestUtil.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/TestUtil.java index b97cd3c1f..2782aa851 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/TestUtil.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/TestUtil.java @@ -57,6 +57,7 @@ import com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlGenerator; import com.gemstone.gemfire.internal.cache.xmlcache.RegionAttributesCreation; import com.gemstone.gemfire.internal.cache.xmlcache.RegionCreation; +import com.gemstone.gemfire.internal.shared.ClientSharedUtils; import com.gemstone.gemfire.internal.shared.NativeCalls; import com.gemstone.gemfire.internal.shared.jna.OSType; import com.gemstone.gemfire.internal.shared.StringPrintWriter; @@ -76,6 +77,7 @@ import com.pivotal.gemfirexd.internal.engine.access.index.MemIndexScanController; import com.pivotal.gemfirexd.internal.engine.access.index.SortedMap2IndexScanController; import com.pivotal.gemfirexd.internal.engine.db.FabricDatabase; +import com.pivotal.gemfirexd.internal.engine.ddl.catalog.messages.GfxdSystemProcedureMessage; import com.pivotal.gemfirexd.internal.engine.ddl.resolver.GfxdPartitionResolver; import com.pivotal.gemfirexd.internal.engine.distributed.metadata.SelectQueryInfo; import com.pivotal.gemfirexd.internal.engine.distributed.utils.GemFireXDUtils; @@ -225,6 +227,18 @@ public static final void reduceLogLevel(String logLevel) { System.setProperty("DistributionManager.VERBOSE", Boolean.toString(oldDMVerbose)); System.clearProperty("gemfire.log-level"); + logLevel = "config"; + } + try { + if (Misc.getGemFireCacheNoThrow() != null) { + // convert logLevel to slf4j name + String level = ClientSharedUtils.convertToLog4LogLevel( + java.util.logging.Level.parse(logLevel.toUpperCase(Locale.ENGLISH))); + GfxdSystemProcedureMessage.SysProcMethod.setLogLevel.processMessage( + new Object[]{"", level}, Misc.getMyId()); + } + } catch (Exception e) { + getLogger().warn("Failed to set log-level " + logLevel, e); } } diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/engine/GfxdLocalLockTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/engine/GfxdLocalLockTest.java index 7e1aa9461..a579d73ff 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/engine/GfxdLocalLockTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/engine/GfxdLocalLockTest.java @@ -22,6 +22,7 @@ import com.gemstone.gemfire.cache.Cache; import com.gemstone.gemfire.cache.CacheFactory; +import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; import com.gemstone.org.jgroups.oswego.concurrent.WriterPreferenceReadWriteLock; import com.pivotal.gemfirexd.internal.engine.locks.impl.GfxdReentrantReadWriteLock; import com.pivotal.gemfirexd.jdbc.JdbcTestBase; @@ -48,6 +49,12 @@ public GfxdLocalLockTest(String name) { private static final AtomicInteger globalId = new AtomicInteger(0); + @Override + protected void setUp() throws Exception { + GemFireCacheImpl.setGFXDSystemForTests(); + super.setUp(); + } + public void testReadWriteLockWithPerf() throws Exception { final int numReaders = 100; final int numWriters = 10; diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/engine/hadoop/mapred/EventInputFormatTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/engine/hadoop/mapred/EventInputFormatTest.java index 7373dc5fb..85bad1923 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/engine/hadoop/mapred/EventInputFormatTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/engine/hadoop/mapred/EventInputFormatTest.java @@ -505,7 +505,7 @@ private void deleteTest( boolean writeOnly, boolean primaryKey, boolean isTransa FileStatus[] list = null; Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); - for (int i = 0; i < 20; i++) { + for (int i = 0; i < 100; i++) { list = fs.listStatus(new Path(HDFS_DIR + "/APP_MYTAB1/0/")); if (list.length == 1) { break; diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/hadoop/EvictionByCriteriaTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/hadoop/EvictionByCriteriaTest.java index 4ed739e4d..f0ddb4ab9 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/hadoop/EvictionByCriteriaTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/hadoop/EvictionByCriteriaTest.java @@ -40,54 +40,80 @@ import com.gemstone.gemfire.cache.EvictionAlgorithm; import com.gemstone.gemfire.cache.EvictionAttributes; import com.gemstone.gemfire.cache.Operation; -import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem; import com.gemstone.gemfire.internal.cache.CachePerfStats; import com.gemstone.gemfire.internal.cache.EntryEventImpl; +import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; import com.gemstone.gemfire.internal.cache.LocalRegion; import com.gemstone.gemfire.internal.cache.RegionEntry; +import com.pivotal.gemfirexd.TestUtil; import com.pivotal.gemfirexd.internal.engine.Misc; import com.pivotal.gemfirexd.internal.engine.ddl.GfxdEvictionCriteria; -import com.pivotal.gemfirexd.internal.engine.distributed.GfxdDumpLocalResultMessage; import com.pivotal.gemfirexd.internal.engine.store.CompactCompositeRegionKey; import com.pivotal.gemfirexd.internal.engine.store.GemFireContainer; import com.pivotal.gemfirexd.internal.engine.store.RegionKey; import com.pivotal.gemfirexd.internal.iapi.types.DataValueDescriptor; import com.pivotal.gemfirexd.internal.iapi.types.SQLVarchar; -import com.pivotal.gemfirexd.jdbc.JdbcTestBase; +import com.pivotal.gemfirexd.jdbc.JUnit4TestBase; +import org.apache.derbyTesting.junit.CleanDatabaseTestSetup; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import static com.pivotal.gemfirexd.TestUtil.jdbcConn; +import static org.junit.Assert.*; /** * Unit tests for GemFireXD EVICTION BY CRITERIA. */ -public class EvictionByCriteriaTest extends JdbcTestBase { +@SuppressWarnings("unchecked") +public class EvictionByCriteriaTest extends JUnit4TestBase { + + private static final String HDFS_DIR = "./evictHDFS"; + + protected static Class thisClass = EvictionByCriteriaTest.class; - static final String HDFS_DIR = "./evictHDFS"; + @BeforeClass + public static void createHDFSStore() throws Exception { + TestUtil.shutDown(); + TestUtil.setCurrentTestClass(thisClass); + TestUtil.currentTest = "all"; + TestUtil.setupConnection(); - public EvictionByCriteriaTest(String name) { - super(name); + Connection conn = jdbcConn; + Statement stmt = conn.createStatement(); + stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" + + HDFS_DIR + "' QUEUEPERSISTENT true"); } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); + + @AfterClass + public static void classTearDown() throws Exception { + JUnit4TestBase.classTearDown(); delete(new File(HDFS_DIR)); } - @Override - protected String reduceLogging() { - return "config"; + @After + public void dropTables() throws SQLException { + CleanDatabaseTestSetup.cleanDatabase(jdbcConn, false); + GemFireCacheImpl cache = GemFireCacheImpl.getExisting(); + cache.getCachePerfStats().clearEvictionByCriteriaStatsForTest(); } - private void delete(File file) { + @SuppressWarnings("ResultOfMethodCallIgnored") + private static void delete(File file) { if (!file.exists()) { return; } if (file.isDirectory()) { - if (file.list().length == 0) { + String[] contents = file.list(); + if (contents == null || contents.length == 0) { file.delete(); } else { File[] files = file.listFiles(); - for (File f : files) { - delete(f); + if (files != null) { + for (File f : files) { + delete(f); + } } file.delete(); } @@ -96,22 +122,18 @@ private void delete(File file) { } } + @Test public void testDDLSupport() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - // eviction of data more than 10 seconds old final String evictClause = "{fn TIMESTAMPDIFF(SQL_TSI_SECOND, " + "ts, CURRENT_TIMESTAMP)} > 10"; stmt.execute("create table e.evictTable(" + "id varchar(20) primary key, qty int, ts timestamp" + ") " + getOffHeapSuffix() + " partition by column(id) " - + "persistent hdfsstore (hdfsdata) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria (" + evictClause + ") eviction frequency 8 seconds"); @@ -155,7 +177,7 @@ public void testDDLSupport() throws Exception { assertFalse(criteria.doEvict(event)); } - // sleep for 14 secs + // sleep for 12 secs Thread.sleep(12000); // more inserts for (int i = 11; i <= 20; i++) { @@ -207,25 +229,20 @@ public void testDDLSupport() throws Exception { } } + @Test public void testEvictionService_AlterFrequency() throws Exception { - reduceLogLevelForTest("config"); - - setupConnection(); + setLogLevelForTest("config"); Connection conn = jdbcConn; - Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - final String evictClause = "qty > 500"; stmt.execute("create table e.evictTable(" + "id varchar(20) primary key, qty int, ts timestamp" + ") " + getOffHeapSuffix() + " partition by column(id) " - + "persistent hdfsstore (hdfsdata) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria (" + evictClause - + ") eviction frequency 15 seconds"); + + ") eviction frequency 8 seconds"); LocalRegion lr = (LocalRegion)Misc.getRegion("/E/EVICTTABLE", true, false); CustomEvictionAttributes evictionAttrs = lr.getAttributes() @@ -239,7 +256,7 @@ public void testEvictionService_AlterFrequency() throws Exception { assertEquals(EvictionAction.OVERFLOW_TO_DISK, defEvictAttrs.getAction()); assertNotNull(evictionAttrs); assertEquals(0, evictionAttrs.getEvictorStartTime()); - assertEquals(15000, evictionAttrs.getEvictorInterval()); + assertEquals(8000, evictionAttrs.getEvictorInterval()); GfxdEvictionCriteria criteria = (GfxdEvictionCriteria)evictionAttrs .getCriteria(); assertEquals(evictClause, criteria.getPredicateString()); @@ -265,7 +282,7 @@ public void testEvictionService_AlterFrequency() throws Exception { } //allow eviction to happen - Thread.sleep(32000); + Thread.sleep(17000); assertTrue(stmt.execute("select * from e.evictTable")); rs = stmt.getResultSet(); @@ -312,8 +329,8 @@ public void testEvictionService_AlterFrequency() throws Exception { .getCriteria(); assertEquals(evictClause, criteria.getPredicateString()); - //Sleep to allow eviction to happen by previous frequency of 15 sec. So 100(101-200) entries will be evicted - Thread.sleep(30000); + //Sleep to allow eviction to happen by previous frequency of 8 sec. So 100(101-200) entries will be evicted + Thread.sleep(17000); //another 100 entries added for (int i = 201; i <= 300; i++) { @@ -333,26 +350,21 @@ public void testEvictionService_AlterFrequency() throws Exception { } assertEquals(50, ids.size()); } - - public void testEvictionService() throws Exception { - reduceLogLevelForTest("config"); - setupConnection(); + @Test + public void testEvictionService() throws Exception { + setLogLevelForTest("config"); Connection conn = jdbcConn; - Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - // eviction of data more than 10 seconds old final String evictClause = "{fn TIMESTAMPDIFF(SQL_TSI_SECOND, " + "ts, CURRENT_TIMESTAMP)} > 10"; stmt.execute("create table e.evictTable(" + "id varchar(20) primary key, qty int, ts timestamp" + ") " + getOffHeapSuffix() + " partition by column(id) " - + "persistent hdfsstore (hdfsdata) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria (" + evictClause + ") eviction frequency 5 seconds"); @@ -395,7 +407,7 @@ public void testEvictionService() throws Exception { Set ids = new HashSet(); while (rs.next()) { int id = rs.getInt("id"); - getLogger().info("The id is " + id); + logger.info("The id is " + id); ids.add(id); } assertEquals(10, ids.size()); @@ -410,16 +422,16 @@ public void testEvictionService() throws Exception { assertEquals(30, lr.getCachePerfStats().getEvaluations()); } } - - public void testBug49900() throws Exception { - setupConnection(); + @Test + public void testBug49900() throws Exception { Connection conn = jdbcConn; Statement st = conn.createStatement(); try { // EVICTION BY CRITERIA for replicated persistent non-HDFS table - st.execute("create table t1 (col1 int) " + getOffHeapSuffix() + " replicate persistent eviction by criteria (col1 > 0) evict incoming"); + st.execute("create table t1 (col1 int) " + getOffHeapSuffix() + + " replicate persistent eviction by criteria (col1 > 0) evict incoming"); fail("EVICTION BY CRITERIA is not supported for non-HDFS tables"); } catch (SQLException e) { @@ -431,7 +443,8 @@ public void testBug49900() throws Exception { try { // EVICTION BY CRITERIA for replicated non-HDFS table - st.execute("create table t2 (col1 int) " + getOffHeapSuffix() + " replicate eviction by criteria (col1 > 0) evict incoming"); + st.execute("create table t2 (col1 int) " + getOffHeapSuffix() + + " replicate eviction by criteria (col1 > 0) evict incoming"); fail("EVICTION BY CRITERIA is not supported for non-HDFS tables"); } catch (SQLException e) { @@ -443,7 +456,9 @@ public void testBug49900() throws Exception { try { // EVICTION BY CRITERIA for partitioned persistent non-HDFS table - st.execute("create table t3 (col1 int) " + getOffHeapSuffix() + " partition by column (col1) persistent eviction by criteria (col1 > 0) evict incoming"); + st.execute("create table t3 (col1 int) " + getOffHeapSuffix() + + " partition by column (col1) persistent " + + "eviction by criteria (col1 > 0) evict incoming"); fail("EVICTION BY CRITERIA is not supported for non-HDFS tables"); } catch (SQLException e) { @@ -455,7 +470,8 @@ public void testBug49900() throws Exception { try { // EVICTION BY CRITERIA for partitioned non-HDFS table - st.execute("create table t4 (col1 int) " + getOffHeapSuffix() + " partition by column (col1) eviction by criteria (col1 > 0) evict incoming"); + st.execute("create table t4 (col1 int) " + getOffHeapSuffix() + + " partition by column (col1) eviction by criteria (col1 > 0) evict incoming"); fail("EVICTION BY CRITERIA is not supported for non-HDFS tables"); } catch (SQLException e) { @@ -467,7 +483,9 @@ public void testBug49900() throws Exception { try { // EVICTION BY CRITERIA for partitioned non-HDFS table - st.execute("create table t5 (col1 varchar(20) primary key, col2 int) " + getOffHeapSuffix() + " partition by column (col1) eviction by lrucount 1 evictaction destroy hdfsstore (hstore)"); + st.execute("create table t5 (col1 varchar(20) primary key, col2 int) " + + getOffHeapSuffix() + " partition by column (col1) " + + "eviction by lrucount 1 evictaction destroy hdfsstore (hstore)"); fail("EVICTION action is not supported for HDFS tables"); } catch (SQLException e) { @@ -476,30 +494,24 @@ public void testBug49900() throws Exception { throw e; } } - } + /** * Check by querying the table if everything is right. * @throws Exception */ + @Test public void testEvictionServiceIndex() throws Exception { - - setupConnection(); - Connection conn = jdbcConn; - Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - // eviction of data more than 10 seconds old final String evictClause = "{fn TIMESTAMPDIFF(SQL_TSI_SECOND, " + "ts, CURRENT_TIMESTAMP)} > 10"; stmt.execute("create table e.evictTable(" + "id varchar(20) primary key, qty int, ts timestamp" + ") " + getOffHeapSuffix() + " partition by column(id) " - + "persistent hdfsstore (hdfsdata) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria (" + evictClause + ") eviction frequency 5 seconds"); @@ -586,25 +598,21 @@ public void testEvictionServiceIndex() throws Exception { // TODO: This validation may change //assertEquals(30, lr.getCachePerfStats().getEvaluations()); } - - public void testEvictIncomingDDLSupport() throws Exception { - setupConnection(); + @Test + public void testEvictIncomingDDLSupport() throws Exception { Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - // eviction of data more than 10 seconds old final String evictClause = "qty > 100"; stmt.execute("create table e.evictTable(" + "id varchar(20) primary key, qty int, ts timestamp" - + ") " + getOffHeapSuffix() + " partition by column(id) " + "persistent hdfsstore (hdfsdata) " + + ") " + getOffHeapSuffix() + " partition by column(id) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria (" + evictClause + ") EVICT INCOMING "); LocalRegion lr = (LocalRegion)Misc.getRegion("/E/EVICTTABLE", true, false); - GemFireContainer container = (GemFireContainer)lr.getUserAttribute(); CustomEvictionAttributes evictionAttrs = lr.getAttributes() .getCustomEvictionAttributes(); EvictionAttributes defEvictAttrs = lr.getAttributes() @@ -632,17 +640,15 @@ public void testEvictIncomingDDLSupport() throws Exception { assertEquals(0, stats.getEvictionsInProgress()); assertEquals(20, stats.getEvaluations()); } - - public void testEvictIncomingQueryHDFS() throws Exception { - setupConnection(); + @Test + public void testEvictIncomingQueryHDFS() throws Exception { Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table e.evictTable( id int primary key, qty int, abc int ) " + getOffHeapSuffix() + " partition by column(id) persistent hdfsstore (hdfsdata) " + stmt.execute("create table e.evictTable( id int primary key, qty int, abc int ) " + + getOffHeapSuffix() + " partition by column(id) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( qty > 100 ) EVICT INCOMING "); stmt.execute("create index idx on e.evictTable (qty, abc)"); @@ -710,20 +716,15 @@ public void testEvictIncomingQueryHDFS() throws Exception { assertEquals(0, ids.size()); } - - /** - * Uncomment this test once defect #49965 is fixed. - */ - public void testEvictIncomingWithPartitionKey() throws Exception { - setupConnection(); + @Test + public void testEvictIncomingWithPartitionKey() throws Exception { Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table e.evictTable( id int primary key, partkey int not null ) " + getOffHeapSuffix() + " partition by column(partkey) persistent hdfsstore (hdfsdata) " + stmt.execute("create table e.evictTable( id int primary key, partkey int not null) " + + getOffHeapSuffix() + " partition by column(partkey) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( id > 10 ) EVICT INCOMING "); // some inserts @@ -740,12 +741,11 @@ public void testEvictIncomingWithPartitionKey() throws Exception { stmt.executeUpdate("insert into e.evictTable values (30, 201 )"); stmt.executeUpdate("delete from e.evictTable where id=30"); - - - //Bounce the system. - shutDown(); - - //Delete the krf to force recovery from the crf + + // Bounce the system. + TestUtil.shutDown(); + + // Delete the krf to force recovery from the crf String currDir = System.getProperty("user.dir"); File cdir = new File(currDir); String[] files = cdir.list(); @@ -757,20 +757,20 @@ public void testEvictIncomingWithPartitionKey() throws Exception { } } } - - setupConnection(); + + // restart + TestUtil.setupConnection(); } - - public void testEvictIncomingWithUniqueIndex() throws Exception { - setupConnection(); + @Test + public void testEvictIncomingWithUniqueIndex() throws Exception { Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table e.evictTable( id int primary key, partkey int not null, qty int, constraint uq unique (partkey, qty) ) " + getOffHeapSuffix() + " partition by column(partkey) persistent hdfsstore (hdfsdata) " + stmt.execute("create table e.evictTable( id int primary key, " + + "partkey int not null, qty int, constraint uq unique (partkey, qty)) " + + getOffHeapSuffix() + " partition by column(partkey) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( qty > 100 ) EVICT INCOMING "); // some inserts @@ -788,19 +788,20 @@ public void testEvictIncomingWithUniqueIndex() throws Exception { } } + @Test public void testEvictIncomingWithUniqueIndexDelete() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table e.evictTable( id int primary key, partkey int not null, qty int, constraint uq unique (partkey, qty) ) " + getOffHeapSuffix() + " partition by column(partkey) persistent hdfsstore (hdfsdata) " + stmt.execute("create table e.evictTable( id int primary key, " + + "partkey int not null, qty int, constraint uq unique (partkey, qty)) " + + getOffHeapSuffix() + " partition by column(partkey) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( qty > 100 ) EVICT INCOMING "); - stmt.execute("create table e.simpleTable( id int primary key, partkey int not null, qty int, constraint uq2 unique (partkey, qty) ) " + getOffHeapSuffix() + " partition by column(partkey) " ); + stmt.execute("create table e.simpleTable( id int primary key, " + + "partkey int not null, qty int, constraint uq2 unique (partkey, qty)) " + + getOffHeapSuffix() + " partition by column(partkey)"); // some inserts for (int i = 20; i <= 20; i++) { @@ -835,6 +836,7 @@ public void testEvictIncomingWithUniqueIndexDelete() throws Exception { stmt.executeUpdate("insert into e.simpleTable values (21, 200, 2000)"); stmt.executeUpdate("insert into e.evictTable values (21, 200, 2000)"); } + /** * Table with unique constraint * Insert one row @@ -843,19 +845,20 @@ public void testEvictIncomingWithUniqueIndexDelete() throws Exception { * try inserting a row with same unique value again * @throws Exception */ + @Test public void testEvictIncomingWithUniqueIndexUpdateDelete() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table e.evictTable( id int primary key, partkey int not null, qty int, constraint uq unique (partkey, qty) ) " + getOffHeapSuffix() + " partition by column(partkey) persistent hdfsstore (hdfsdata) " + stmt.execute("create table e.evictTable( id int primary key, " + + "partkey int not null, qty int, constraint uq unique (partkey, qty)) " + + getOffHeapSuffix() + " partition by column(partkey) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( qty > 100 ) EVICT INCOMING "); - stmt.execute("create table e.simpleTable( id int primary key, partkey int not null, qty int, constraint uq2 unique (partkey, qty) ) " + getOffHeapSuffix() + " partition by column(partkey) " ); + stmt.execute("create table e.simpleTable( id int primary key, " + + "partkey int not null, qty int, constraint uq2 unique (partkey, qty)) " + + getOffHeapSuffix() + " partition by column(partkey)"); int i = 20; stmt.executeUpdate("insert into e.evictTable values (" + i + ", " + (i * 10) +", " + (i * 100) + ")"); @@ -885,16 +888,17 @@ public void testEvictIncomingWithUniqueIndexUpdateDelete() throws Exception { * insert the row with same unique keys again. it should succeed * @throws Exception */ + @Test public void testEvictIncomingWithUniqueIndexUpdateDelete2() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table trade.portfolio (cid int not null, sid int not null, qty int not null, availQty int not null, subTotal decimal(30,20), tid int, constraint portf_pk primary key (cid, sid), constraint qty_ck check (qty>=0), constraint avail_ch check (availQty>=0 and availQty<=qty)) " + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.portfolio (cid int not null, " + + "sid int not null, qty int not null, availQty int not null, " + + "subTotal decimal(30,20), tid int, constraint portf_pk " + + "primary key (cid, sid), constraint qty_ck check (qty>=0), " + + "constraint avail_ch check (availQty>=0 and availQty<=qty)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "EVICTION BY CRITERIA ( qty > 500 ) EVICT INCOMING "); stmt.executeUpdate("insert into trade.portfolio values(3, 120, 1592, 1592, 14264.32000000000000000000 ,18)"); @@ -911,21 +915,20 @@ public void testEvictIncomingWithUniqueIndexUpdateDelete2() throws Exception { assertFalse(rs.next()); stmt.executeUpdate("insert into trade.portfolio values(3, 120, 1374, 1374, 12311.04000000000000000000, 8)"); } - - - public void testEvictIncomingWithUniqueIndexUpdate() throws Exception { - setupConnection(); + @Test + public void testEvictIncomingWithUniqueIndexUpdate() throws Exception { Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table e.evictTable( id int primary key, partkey int not null, qty int, constraint uq unique (partkey, qty) ) " + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) " + stmt.execute("create table e.evictTable( id int primary key, " + + "partkey int not null, qty int, constraint uq unique (partkey, qty)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( qty > 100 ) EVICT INCOMING "); - stmt.execute("create table e.simpleTable( id int primary key, partkey int not null, qty int, constraint uq2 unique (partkey, qty) ) " + getOffHeapSuffix() + " " ); + stmt.execute("create table e.simpleTable( id int primary key, " + + "partkey int not null, qty int, constraint uq2 unique (partkey, qty)) " + + getOffHeapSuffix()); // some inserts for (int i = 20; i <= 20; i++) { @@ -952,16 +955,17 @@ public void testEvictIncomingWithUniqueIndexUpdate() throws Exception { * 3. Validate there is no data in operational * @throws Exception */ + @Test public void testEvictIncomingWithUniqueIndexUpdate2() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table trade.portfolio (cid int not null, sid int not null, qty int not null, availQty int not null, subTotal decimal(30,20), tid int, constraint portf_pk primary key (cid, sid), constraint qty_ck check (qty>=0), constraint avail_ch check (availQty>=0 and availQty<=qty)) " + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.portfolio (cid int not null, " + + "sid int not null, qty int not null, availQty int not null, " + + "subTotal decimal(30,20), tid int, constraint portf_pk " + + "primary key (cid, sid), constraint qty_ck check (qty>=0), " + + "constraint avail_ch check (availQty>=0 and availQty<=qty)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "EVICTION BY CRITERIA ( qty > 500 ) EVICT INCOMING "); // some inserts @@ -1031,31 +1035,34 @@ public void testEvictIncomingWithUniqueIndexUpdate2() throws Exception { * For cheetah, foreign key constraint won't be supported with custom eviction to HDFS. * Defect # 49367/49452. */ + @Test public void testEvictIncomingWithForeignKey() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table trade.customers (cid int not null, cust_name int, primary key (cid)) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.customers (cid int not null, " + + "cust_name int, primary key (cid)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( cid > 5 ) EVICT INCOMING "); String expectedMessage = "Foreign key constraint is not supported with custom eviction criteria for HDFS tables."; try { - stmt.execute("create table trade.networth (netid int not null, cid int not null, cash decimal (30, 20), constraint netw_pk primary key (netid), constraint cust_newt_fk foreign key (cid) references trade.customers (cid) on delete restrict) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.networth (netid int not null, " + + "cid int not null, cash decimal (30, 20), constraint netw_pk " + + "primary key (netid), constraint cust_newt_fk foreign key (cid) " + + "references trade.customers (cid) on delete restrict) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( cash > 1000 ) EVICT INCOMING "); fail("Expected SQLFeatureNotSupportedException as FK is not supported with custom eviction to HDFS."); } catch (SQLFeatureNotSupportedException e) { assertTrue(e.getMessage().equals(expectedMessage)); } - stmt.execute("create table trade.networth (netid int not null, cid int not null, cash decimal (30, 20), constraint netw_pk primary key (netid), constraint cust_newt_fk foreign key (cid) references trade.customers (cid) on delete restrict) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) "); + stmt.execute("create table trade.networth (netid int not null, " + + "cid int not null, cash decimal (30, 20), constraint netw_pk " + + "primary key (netid), constraint cust_newt_fk foreign key (cid) " + + "references trade.customers (cid) on delete restrict) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5"); } @@ -1065,17 +1072,14 @@ public void testEvictIncomingWithForeignKey() throws Exception { * Update the second non-operational such that it remains non-operational * @throws Exception */ + @Test public void testEvictIncomingWithLocalIndexes() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table trade.customers (cid int not null, cust_name int, addr int, primary key (cid)) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.customers (cid int not null, " + + "cust_name int, addr int, primary key (cid)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( cust_name > 5 ) EVICT INCOMING "); // index on cust_name and addr @@ -1217,17 +1221,14 @@ public void testEvictIncomingWithLocalIndexes() throws Exception { * * @throws Exception */ + @Test public void testEvictIncomingWithLocalIndexesMultipleOperations() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table trade.customers (cid int not null, cust_name int, addr int, primary key (cid)) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.customers (cid int not null, " + + "cust_name int, addr int, primary key (cid)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( cust_name > 5 ) EVICT INCOMING "); // index on cust_name and addr @@ -1322,18 +1323,18 @@ public void testEvictIncomingWithLocalIndexesMultipleOperations() throws Excepti LocalRegion lr = (LocalRegion)Misc.getRegion("/TRADE/CUSTOMERS", true, false); assertEquals(0, lr.size()); } - - public void testEvictIncomingWithLocalIndexesFailedOperation() throws Exception { - setupConnection(); + @Test + public void testEvictIncomingWithLocalIndexesFailedOperation() throws Exception { Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table trade.portfolio (cid int not null, sid int not null, qty int not null, availQty int not null, constraint portf_pk primary key (cid, sid), constraint qty_ck check (qty>=0), constraint avail_ch check (availQty>=0 and availQty<=qty)) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.portfolio (cid int not null, " + + "sid int not null, qty int not null, availQty int not null, " + + "constraint portf_pk primary key (cid, sid), " + + "constraint qty_ck check (qty>=0), " + + "constraint avail_ch check (availQty>=0 and availQty<=qty)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( qty > 100 ) EVICT INCOMING "); // some inserts @@ -1363,14 +1364,14 @@ public void testEvictIncomingWithLocalIndexesFailedOperation() throws Exception LocalRegion lr = (LocalRegion)Misc.getRegion("/TRADE/PORTFOLIO", true, false); //assertEquals(0, lr.size()); } - - - public void testEvictIncomingWithTrigger() throws Exception { - setupConnection(); + @Test + public void testEvictIncomingWithTrigger() throws Exception { Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create table e.evictTable_history( id int primary key, qty int, abc int ) " + getOffHeapSuffix() + " "); + + stmt.execute("create table e.evictTable_history( id int primary key, " + + "qty int, abc int ) " + getOffHeapSuffix()); String insertStmt = "INSERT INTO e.evictTable_history VALUES ( NEWROW.id , NEWROW.qty , NEWROW.abc )"; String delStmt = "DELETE FROM e.evictTable_history WHERE id=OLDROW.id"; @@ -1380,10 +1381,9 @@ public void testEvictIncomingWithTrigger() throws Exception { String updateTriggerStmt = "CREATE TRIGGER e.evictTable_DELETEFORUPDATE AFTER UPDATE ON e.evictTable REFERENCING NEW AS NEWROW OLD AS OLDROW FOR EACH ROW " + delStmt; String updateTriggerStmt1 = "CREATE TRIGGER e.evictTable_INSERTFORUPDATE AFTER UPDATE ON e.evictTable REFERENCING NEW AS NEWROW OLD AS OLDROW FOR EACH ROW " + insertStmt; - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table e.evictTable( id int primary key, qty int, abc int ) " + getOffHeapSuffix() + " partition by column(id) persistent hdfsstore (hdfsdata) " + stmt.execute("create table e.evictTable( id int primary key, qty int, abc int ) " + + getOffHeapSuffix() + " partition by column(id) " + + "persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( qty > 100 ) EVICT INCOMING "); stmt.execute(insertTriggerStmt); @@ -1426,21 +1426,22 @@ public void testEvictIncomingWithTrigger() throws Exception { * Insert on child table should not throw SQLIntegrityException though parent row is * evicted. */ + @Test public void testEvictIncomingWithForeignKey2() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table trade.customers (cid int not null, cust_name int, primary key (cid)) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.customers (cid int not null, " + + "cust_name int, primary key (cid)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( cid > 5 ) EVICT INCOMING "); - stmt.execute("create table trade.networth (netid int not null, cid int not null, cash decimal (30, 20), constraint netw_pk primary key (netid), constraint cust_newt_fk foreign key (cid) references trade.customers (cid) on delete restrict) " + getOffHeapSuffix() + " " - + " persistent hdfsstore (hdfsdata) "); + stmt.execute("create table trade.networth (netid int not null, " + + "cid int not null, cash decimal (30, 20), " + + "constraint netw_pk primary key (netid), " + + "constraint cust_newt_fk foreign key (cid) references " + + "trade.customers (cid) on delete restrict) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5"); // some inserts stmt.executeUpdate("insert into trade.customers values (" + 12 + ", " + (12 * 100) + ")"); @@ -1470,23 +1471,21 @@ public void testEvictIncomingWithForeignKey2() throws Exception { } /** - * Eviction already defined on HDFS table. Alter the table to add a foreign key constraint. + * Eviction already defined on HDFS table. + * Alter the table to add a foreign key constraint. */ + @Test public void testEvictIncomingWithAlterTableAddForeignKeyConstraint() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - - stmt.execute("create table trade.customers (cid int not null, cust_name int, primary key (cid)) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.customers (cid int not null, " + + "cust_name int, primary key (cid)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( cid > 5 ) EVICT INCOMING "); stmt.execute("create table trade.networth (netid int not null, cid int not null, cash decimal (30, 20), constraint netw_pk primary key (netid)) " + getOffHeapSuffix() + " " - + " persistent hdfsstore (hdfsdata)" + + " persistent hdfsstore (hdfsdata) buckets 5" + " eviction by criteria ( cash > 1000 ) EVICT INCOMING "); try { @@ -1501,42 +1500,27 @@ public void testEvictIncomingWithAlterTableAddForeignKeyConstraint() throws Exce * No eviction criteria defined on HDFS table. * Alter the table to add a foreign key constraint. */ + @Test public void testEvictIncomingWithAlterTableAddForeignKeyConstraint_2() throws Exception { - setupConnection(); - Connection conn = jdbcConn; Statement stmt = conn.createStatement(); - - stmt.execute("create hdfsstore hdfsdata namenode 'localhost' homedir '" - + HDFS_DIR + "' QUEUEPERSISTENT true"); - stmt.execute("create table trade.customers (cid int not null, cust_name int, primary key (cid)) " + getOffHeapSuffix() + " " - + "persistent hdfsstore (hdfsdata) " + stmt.execute("create table trade.customers (cid int not null, " + + "cust_name int, primary key (cid)) " + + getOffHeapSuffix() + " persistent hdfsstore (hdfsdata) buckets 5 " + "eviction by criteria ( cid > 5 ) EVICT INCOMING "); - stmt.execute("create table trade.networth (netid int not null, cid int not null, cash decimal (30, 20), constraint netw_pk primary key (netid)) " + getOffHeapSuffix() + " " - + " persistent hdfsstore (hdfsdata)"); + stmt.execute("create table trade.networth (netid int not null, " + + "cid int not null, cash decimal (30, 20), constraint netw_pk " + + "primary key (netid)) " + getOffHeapSuffix() + + " persistent hdfsstore (hdfsdata) buckets 5"); stmt.execute("alter table trade.networth add constraint " + "cust_newt_fk foreign key (cid) references trade.customers (cid)"); } - - public static void dumpAll() { - try { - GfxdDumpLocalResultMessage msg = new GfxdDumpLocalResultMessage(); - InternalDistributedSystem sys = InternalDistributedSystem - .getConnectedInstance(); - msg.send(sys, null); - msg.executeLocally(sys.getDistributionManager(), false); - GfxdDumpLocalResultMessage.sendBucketInfoDumpMsg(null, false); - } catch (Throwable t) { - throw new RuntimeException(t.getMessage()); - } - } - + protected String getOffHeapSuffix() { return " "; } - } diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/hadoop/offheap/OffHeapEvictionByCriteriaTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/hadoop/offheap/OffHeapEvictionByCriteriaTest.java index f15b8eeb2..0d4c806f8 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/hadoop/offheap/OffHeapEvictionByCriteriaTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/internal/hadoop/offheap/OffHeapEvictionByCriteriaTest.java @@ -19,29 +19,28 @@ import com.gemstone.gemfire.distributed.internal.DistributionConfig; import com.pivotal.gemfirexd.internal.engine.management.GfxdManagementService; import com.pivotal.gemfirexd.internal.hadoop.EvictionByCriteriaTest; +import org.junit.AfterClass; +import org.junit.BeforeClass; -public class OffHeapEvictionByCriteriaTest extends EvictionByCriteriaTest{ - - public OffHeapEvictionByCriteriaTest(String name) { - super(name); - } - - @Override - public void setUp() throws Exception { - super.setUp(); +public class OffHeapEvictionByCriteriaTest extends EvictionByCriteriaTest { + + @BeforeClass + public static void createHDFSStore() throws Exception { + thisClass = OffHeapEvictionByCriteriaTest.class; System.setProperty("gemfire.OFF_HEAP_TOTAL_SIZE", "500m"); - System.setProperty("gemfire."+DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, "500m"); - System.setProperty(GfxdManagementService.DISABLE_MANAGEMENT_PROPERTY,"true"); + System.setProperty("gemfire." + DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, "500m"); + System.setProperty(GfxdManagementService.DISABLE_MANAGEMENT_PROPERTY, "true"); + EvictionByCriteriaTest.createHDFSStore(); } - - @Override - public void tearDown() throws Exception { - super.tearDown(); - System.setProperty("gemfire.OFF_HEAP_TOTAL_SIZE", ""); - System.setProperty("gemfire."+DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, ""); - System.setProperty(GfxdManagementService.DISABLE_MANAGEMENT_PROPERTY,""); + + @AfterClass + public static void classTearDown() throws Exception { + EvictionByCriteriaTest.classTearDown(); + System.clearProperty("gemfire.OFF_HEAP_TOTAL_SIZE"); + System.clearProperty("gemfire." + DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME); + System.clearProperty(GfxdManagementService.DISABLE_MANAGEMENT_PROPERTY); } - + @Override protected String getOffHeapSuffix() { return " offheap "; diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/BlobSetMethodsTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/BlobSetMethodsTest.java index 9f8d17176..6199e40cf 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/BlobSetMethodsTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/BlobSetMethodsTest.java @@ -237,12 +237,7 @@ public void testGetBinaryStreamLong_42711() throws Exception { assertTrue(Arrays.equals(bytes1, bytes2)); assertFalse(rs.next()); } - - public void testDummy() { - - } - - + public void waitTillAllClear() { } diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/BlobTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/BlobTest.java index c904f4ed3..560a89a3b 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/BlobTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/BlobTest.java @@ -94,7 +94,7 @@ public void testBug43623() throws Exception { final Statement stmt = conn.createStatement(); stmt.execute(tableDDL); - final int numThreads = 50; + final int numThreads = 20; Thread[] ts = new Thread[numThreads]; final Exception[] failure = new Exception[1]; final CyclicBarrier barrier = new CyclicBarrier(numThreads); diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/CreateTableAsQueryExpressionTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/CreateTableAsQueryExpressionTest.java index 7b02bf0c0..4e13c31b7 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/CreateTableAsQueryExpressionTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/CreateTableAsQueryExpressionTest.java @@ -338,8 +338,4 @@ public void testInvalidColumnGivenAsPartitioningColumn() throws SQLException, } assertTrue(gotException); } - - public void testDummy() { - - } } diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/CreateTableTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/CreateTableTest.java index b9e68b809..48504b216 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/CreateTableTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/CreateTableTest.java @@ -2940,7 +2940,7 @@ public void testSerializedException_43160() throws Exception { se.getSQLState(), se_ser); SqlExceptionTest.assertSQLExceptionEquals(se, se_ser); } finally { - stmt.execute("delete from tableWithPK where 1=1"); + stmt.execute("delete from tableWithPK"); if (i == 2) { stopNetServer(); } diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/JUnit4TestBase.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/JUnit4TestBase.java index ab9737dfc..6555c724c 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/JUnit4TestBase.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/JUnit4TestBase.java @@ -22,16 +22,17 @@ import java.sql.SQLException; import java.util.Properties; +import com.pivotal.gemfirexd.TestUtil; +import com.pivotal.gemfirexd.internal.engine.store.GemFireStore; import org.apache.derbyTesting.junit.CleanDatabaseTestSetup; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.Rule; import org.junit.rules.TestName; -import com.pivotal.gemfirexd.TestUtil; -import com.pivotal.gemfirexd.internal.engine.store.GemFireStore; - /** * Base class for JUnit 4 tests that want to do common setup for boot properties * like logs, stats etc while continuing to use TestUtil. @@ -41,8 +42,10 @@ public class JUnit4TestBase { @Rule public TestName name = new TestName(); + protected final Logger logger = LogManager.getLogger(getClass()); + @AfterClass - public static void classTearDown() throws SQLException { + public static void classTearDown() throws Exception { TestUtil.setCurrentTestClass(null); TestUtil.currentTest = null; // cleanup all tables @@ -52,9 +55,16 @@ public static void classTearDown() throws SQLException { props.setProperty("user", TestUtil.bootUserName); props.setProperty("password", TestUtil.bootUserPassword); } - Connection conn = DriverManager.getConnection(TestUtil.getProtocol(), - props); - CleanDatabaseTestSetup.cleanDatabase(conn, false); + try { + Connection conn = DriverManager.getConnection( + TestUtil.getProtocol(), props); + CleanDatabaseTestSetup.cleanDatabase(conn, false); + } catch (SQLException ignored) { + } + } + try { + TestUtil.shutDown(); + } catch (SQLException ignored) { } } diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/offheap/OffHeapOHACOptimizationTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/offheap/OffHeapOHACOptimizationTest.java index 734b6fe44..755b95778 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/offheap/OffHeapOHACOptimizationTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/jdbc/offheap/OffHeapOHACOptimizationTest.java @@ -18,10 +18,8 @@ import java.lang.reflect.Field; import java.sql.Connection; -import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; -import java.sql.SQLException; import java.sql.SQLWarning; import java.sql.Statement; import java.sql.Timestamp; @@ -52,7 +50,6 @@ import com.gemstone.gemfire.internal.cache.TXState; import com.gemstone.gemfire.internal.offheap.SimpleMemoryAllocatorImpl.Chunk; import com.pivotal.gemfirexd.TestUtil; -import com.pivotal.gemfirexd.TestUtil.ScanTypeQueryObserver; import com.pivotal.gemfirexd.internal.engine.GemFireXDQueryObserverAdapter; import com.pivotal.gemfirexd.internal.engine.GemFireXDQueryObserverHolder; import com.pivotal.gemfirexd.internal.engine.Misc; @@ -78,7 +75,6 @@ import com.pivotal.gemfirexd.internal.impl.sql.execute.ResultSetStatisticsVisitor; import com.pivotal.gemfirexd.internal.impl.sql.execute.SortResultSet; import com.pivotal.gemfirexd.jdbc.JdbcTestBase; -import com.pivotal.gemfirexd.jdbc.JdbcTestBase.RegionMapClearDetector; public class OffHeapOHACOptimizationTest extends JdbcTestBase { private RegionMapClearDetector rmcd = null; @@ -249,14 +245,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -386,14 +374,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -517,14 +497,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -649,14 +621,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -786,14 +750,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -917,14 +873,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -1050,14 +998,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -1191,14 +1131,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -1338,14 +1270,6 @@ public void onSortResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -1477,14 +1401,6 @@ public void onGroupedAggregateResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -1622,14 +1538,6 @@ public void onGroupedAggregateResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -1769,14 +1677,6 @@ public void onGroupedAggregateResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -1909,14 +1809,6 @@ public void onGroupedAggregateResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } @@ -2056,14 +1948,6 @@ public void onGroupedAggregateResultSetOpen( } } finally { - try { - DriverManager.getConnection("jdbc:derby:;shutdown=true"); - } catch (SQLException sqle) { - if (sqle.getMessage().indexOf("shutdown") == -1) { - sqle.printStackTrace(); - throw sqle; - } - } GemFireXDQueryObserverHolder.clearInstance(); } diff --git a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/security/LdapGroupAuthTest.java b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/security/LdapGroupAuthTest.java index 056df2de8..18f1b11b8 100644 --- a/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/security/LdapGroupAuthTest.java +++ b/gemfirexd/tools/src/test/java/com/pivotal/gemfirexd/security/LdapGroupAuthTest.java @@ -21,21 +21,7 @@ import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.ListIterator; -import java.util.Properties; -import java.util.Set; - -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import java.util.*; import com.gemstone.gemfire.internal.SocketCreator; import com.gemstone.gemfire.internal.util.ArrayUtils; @@ -60,6 +46,10 @@ import com.pivotal.gemfirexd.internal.impl.sql.catalog.TabInfoImpl; import com.pivotal.gemfirexd.internal.impl.sql.execute.GranteeIterator; import com.pivotal.gemfirexd.jdbc.JUnit4TestBase; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; public class LdapGroupAuthTest extends JUnit4TestBase { @@ -85,10 +75,9 @@ public static void startServer() throws Exception { } @AfterClass - public static void stopServer() throws Exception { + public static void classTearDown() throws Exception { SQLException failure = closeStatements(conns, stmts); - classTearDown(); - TestUtil.shutDown(); + JUnit4TestBase.classTearDown(); netPort = 0; final LdapTestServer server = LdapTestServer.getInstance(); if (server.isServerStarted()) { @@ -152,7 +141,7 @@ public void ldapGroupMembers() throws Exception { */ @Test public void ldapADGroupMembers() throws Exception { - stopServer(); + classTearDown(); Properties bootProperties = SecurityTestUtils .startLdapServerAndGetBootProperties(0, 0, sysUser, TestUtil.getResourcesDir() + "/lib/ldap/authAD.ldif"); @@ -191,7 +180,7 @@ public void ldapADGroupMembers() throws Exception { Set group7Members = getLdapGroupMembers("gemgroup7"); Assert.assertEquals(expectedGroup7Members, group7Members); - stopServer(); + classTearDown(); startServer(); } diff --git a/gemfirexd/tools/src/testing/java/org/apache/derbyTesting/junit/JDBC.java b/gemfirexd/tools/src/testing/java/org/apache/derbyTesting/junit/JDBC.java index 8b960bcd3..1ad3ff096 100644 --- a/gemfirexd/tools/src/testing/java/org/apache/derbyTesting/junit/JDBC.java +++ b/gemfirexd/tools/src/testing/java/org/apache/derbyTesting/junit/JDBC.java @@ -20,6 +20,7 @@ package org.apache.derbyTesting.junit; // GemStone changes BEGIN +import com.gemstone.gemfire.cache.PartitionAttributes; import com.gemstone.gemfire.cache.Region; import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem; import com.gemstone.gemfire.internal.cache.CacheObserverAdapter; @@ -467,11 +468,24 @@ private static void dropUsingDMD( // First collect the set of DROP SQL statements. ArrayList ddl = new ArrayList(); + GemFireCacheImpl cache = GemFireCacheImpl.getInstance(); while (rs.next()) { String objectName = rs.getString(mdColumn); String raw = dropLeadIn + JDBC.escape(schema, objectName); if ( "TYPE".equals( dropType ) ) { raw = raw + " restrict "; } + // move child tables at the start + if (cache != null) { + Region region = Misc.getRegionByPath( + Misc.getRegionPath(schema, objectName, null), false); + PartitionAttributes pattrs; + if (region != null && + (pattrs = region.getAttributes().getPartitionAttributes()) != null && + pattrs.getColocatedWith() != null) { + ddl.add(0, raw); + continue; + } + } ddl.add( raw ); } rs.close(); diff --git a/gradle.properties b/gradle.properties index 01b1dd9b8..44ee79275 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,5 +1,5 @@ org.gradle.daemon = false -#org.gradle.jvmargs = -Xmx2g -XX:MaxPermSize=350m +#org.gradle.parallel=true # Set this on the command line with -P or in ~/.gradle/gradle.properties # to change the buildDir location. Use an absolute path. diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 5ccda13e9..f808147c2 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index e8b130d69..42deefabf 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ -#Sun May 08 20:12:34 IST 2016 +#Wed Sep 13 23:36:27 IST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-2.11-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-3.5.1-all.zip diff --git a/gradlew b/gradlew index 9cc321151..8f0616712 100755 --- a/gradlew +++ b/gradlew @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env sh ############################################################################## ## @@ -6,12 +6,30 @@ ## ############################################################################## -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="-Xmx2g -XX:ReservedCodeCacheSize=512m" +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="-Xmx2g -XX:ReservedCodeCacheSize=512m -Djava.net.preferIPv4Stack=true" + # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" @@ -30,6 +48,7 @@ die ( ) { cygwin=false msys=false darwin=false +nonstop=false case "`uname`" in CYGWIN* ) cygwin=true @@ -40,26 +59,11 @@ case "`uname`" in MINGW* ) msys=true ;; + NONSTOP* ) + nonstop=true + ;; esac -# Attempt to set APP_HOME -# Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi -done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. @@ -85,7 +89,7 @@ location of your Java installation." fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then @@ -150,11 +154,19 @@ if $cygwin ; then esac fi -# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules -function splitJvmOpts() { - JVM_OPTS=("$@") +# Escape application args +save ( ) { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " } -eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS -JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi -exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat index 35421804f..78bf60415 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -8,14 +8,14 @@ @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS=-Xmx2g -XX:ReservedCodeCacheSize=512m - set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS=-Xmx2g -XX:ReservedCodeCacheSize=512m -Djava.net.preferIPv4Stack=true + @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome @@ -49,7 +49,6 @@ goto fail @rem Get command-line arguments, handling Windows variants if not "%OS%" == "Windows_NT" goto win9xME_args -if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. @@ -60,11 +59,6 @@ set _SKIP=2 if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* -goto execute - -:4NT_args -@rem Get arguments from the 4NT Shell from JP Software -set CMD_LINE_ARGS=%$ :execute @rem Setup the command line diff --git a/lgpl/gemfirexd-hibernate/build.gradle b/lgpl/gemfirexd-hibernate/build.gradle index a3c2433d3..4c73eef50 100644 --- a/lgpl/gemfirexd-hibernate/build.gradle +++ b/lgpl/gemfirexd-hibernate/build.gradle @@ -1,6 +1,6 @@ dependencies { - provided project(subprojectBase + 'snappydata-store-core') - provided 'org.hibernate:hibernate-core:4.3.11.Final' + compileOnly project(subprojectBase + 'snappydata-store-core') + compileOnly 'org.hibernate:hibernate-core:4.3.11.Final' } if (rootProject.hasProperty('enablePublish')) { diff --git a/native/build.gradle b/native/build.gradle index 59ef61e18..6737bc8f1 100644 --- a/native/build.gradle +++ b/native/build.gradle @@ -16,7 +16,7 @@ */ plugins { - id 'de.undercouch.download' version '3.0.0' + id 'de.undercouch.download' version '3.2.0' } apply plugin: 'wrapper' diff --git a/native/gradle/wrapper/gradle-wrapper.jar b/native/gradle/wrapper/gradle-wrapper.jar index 5ccda13e9..f808147c2 100644 Binary files a/native/gradle/wrapper/gradle-wrapper.jar and b/native/gradle/wrapper/gradle-wrapper.jar differ diff --git a/native/gradle/wrapper/gradle-wrapper.properties b/native/gradle/wrapper/gradle-wrapper.properties index 8ef411dad..42deefabf 100644 --- a/native/gradle/wrapper/gradle-wrapper.properties +++ b/native/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ -#Wed Mar 09 16:47:54 IST 2016 +#Wed Sep 13 23:36:27 IST 2017 distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-2.13-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-3.5.1-all.zip diff --git a/native/gradlew b/native/gradlew index 9d82f7891..8f0616712 100755 --- a/native/gradlew +++ b/native/gradlew @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env sh ############################################################################## ## @@ -6,12 +6,30 @@ ## ############################################################################## -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS="" +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="-Xmx2g -XX:ReservedCodeCacheSize=512m -Djava.net.preferIPv4Stack=true" + # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" @@ -30,6 +48,7 @@ die ( ) { cygwin=false msys=false darwin=false +nonstop=false case "`uname`" in CYGWIN* ) cygwin=true @@ -40,26 +59,11 @@ case "`uname`" in MINGW* ) msys=true ;; + NONSTOP* ) + nonstop=true + ;; esac -# Attempt to set APP_HOME -# Resolve links: $0 may be a link -PRG="$0" -# Need this for relative symlinks. -while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG=`dirname "$PRG"`"/$link" - fi -done -SAVED="`pwd`" -cd "`dirname \"$PRG\"`/" >/dev/null -APP_HOME="`pwd -P`" -cd "$SAVED" >/dev/null - CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar # Determine the Java command to use to start the JVM. @@ -85,7 +89,7 @@ location of your Java installation." fi # Increase the maximum file descriptors if we can. -if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then MAX_FD_LIMIT=`ulimit -H -n` if [ $? -eq 0 ] ; then if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then @@ -150,11 +154,19 @@ if $cygwin ; then esac fi -# Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules -function splitJvmOpts() { - JVM_OPTS=("$@") +# Escape application args +save ( ) { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " } -eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS -JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME" +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi -exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@" +exec "$JAVACMD" "$@" diff --git a/native/gradlew.bat b/native/gradlew.bat index 72d362daf..78bf60415 100644 --- a/native/gradlew.bat +++ b/native/gradlew.bat @@ -8,14 +8,14 @@ @rem Set local scope for the variables with windows NT shell if "%OS%"=="Windows_NT" setlocal -@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - set DIRNAME=%~dp0 if "%DIRNAME%" == "" set DIRNAME=. set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS=-Xmx2g -XX:ReservedCodeCacheSize=512m -Djava.net.preferIPv4Stack=true + @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome @@ -49,7 +49,6 @@ goto fail @rem Get command-line arguments, handling Windows variants if not "%OS%" == "Windows_NT" goto win9xME_args -if "%@eval[2+2]" == "4" goto 4NT_args :win9xME_args @rem Slurp the command line arguments. @@ -60,11 +59,6 @@ set _SKIP=2 if "x%~1" == "x" goto execute set CMD_LINE_ARGS=%* -goto execute - -:4NT_args -@rem Get arguments from the 4NT Shell from JP Software -set CMD_LINE_ARGS=%$ :execute @rem Setup the command line diff --git a/settings.gradle b/settings.gradle index 597210369..2926aede4 100644 --- a/settings.gradle +++ b/settings.gradle @@ -19,12 +19,11 @@ rootProject.name = 'snappy-store' include ':gemfire-jgroups' include ':gemfire-trove' -include 'gemfire-joptsimple' -include 'gemfire-json' +include 'gemfire-util' include 'gemfire-shared' include 'gemfire-core' include 'gemfire-junit' -include 'gemfire-web' +// include 'gemfire-web' include 'gemfire-examples' include ':snappydata-store-shared' include ':snappydata-store-prebuild' diff --git a/tests/core/build.gradle b/tests/core/build.gradle index afe4b4ac4..15223cb84 100644 --- a/tests/core/build.gradle +++ b/tests/core/build.gradle @@ -30,9 +30,9 @@ idea { } dependencies { - provided project(subprojectBase + 'gemfire-jgroups') - provided project(subprojectBase + 'gemfire-core') - provided project(subprojectBase + 'gemfire-examples') + compileOnly project(subprojectBase + 'gemfire-jgroups') + compileOnly project(subprojectBase + 'gemfire-core') + compileOnly project(subprojectBase + 'gemfire-examples') compile project(subprojectBase + 'gemfire-junit') compile "junit:junit:${junitVersion}" @@ -47,7 +47,7 @@ dependencies { compile 'org.jmock:jmock-legacy:2.5.1' compile 'edu.umd.cs.mtc:multithreadedtc:1.01' compile 'cglib:cglib-nodep:2.1_3' - compile 'org.objenesis:objenesis:1.0' + compile 'org.objenesis:objenesis:2.6' compile 'net.spy:spymemcached:2.12.0' compile 'com.vmware:vijava:5.1' compile 'org.jdom:jdom:1.1.3' @@ -58,25 +58,35 @@ dependencies { compile "org.eclipse.jetty:jetty-util:${jettyVersion}" compile "org.eclipse.jetty:jetty-webapp:${jettyVersion}" - provided 'net.sourceforge.jline:jline:1.0.S2-B' - provided files("${System.getProperty('java.home')}/../lib/tools.jar") - provided "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-auth:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-common:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" - provided "com.google.protobuf:protobuf-java:${protobufVersion}" - provided "com.sun.jersey:jersey-core:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-server:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" - provided "org.mortbay.jetty:jetty:${hadoopJettyVersion}" - provided "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" - provided "com.google.code.findbugs:jsr305:${jsr305Version}" - provided "org.springframework:spring-core:${springVersion}" - provided 'org.springframework.shell:spring-shell:1.0.0.RELEASE' + compile "org.apache.hbase:hbase-common:${hbaseVersion}" + compile "org.apache.hbase:hbase-protocol:${hbaseVersion}" + compile "org.apache.hbase:hbase-client:${hbaseVersion}" + compile "org.apache.hbase:hbase-server:${hbaseVersion}" + compile "com.sun.jersey:jersey-core:${sunJerseyVersion}" + compile "com.sun.jersey:jersey-server:${sunJerseyVersion}" + compile "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" + compile "org.mortbay.jetty:jetty:${hadoopJettyVersion}" + compile "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" + compile 'org.cloudera.htrace:htrace-core:2.05' - provided "org.apache.hadoop:hadoop-common:${hadoopVersion}:tests" - provided "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}:tests" + compileOnly 'net.sourceforge.jline:jline:1.0.S2-B' + compileOnly files("${System.getProperty('java.home')}/../lib/tools.jar") + compileOnly "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-auth:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-common:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" + compileOnly "com.google.protobuf:protobuf-java:${protobufVersion}" + compileOnly "org.glassfish.jersey.core:jersey-server:${jerseyVersion}" + compileOnly "org.glassfish.jersey.containers:jersey-container-servlet-core:${jerseyVersion}" + compileOnly "org.eclipse.jetty:jetty-server:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-util:${jettyVersion}" + compileOnly "com.google.code.findbugs:jsr305:${jsr305Version}" + compileOnly "org.springframework:spring-core:${springVersion}" + compileOnly 'org.springframework.shell:spring-shell:1.0.0.RELEASE' + + compileOnly "org.apache.hadoop:hadoop-common:${hadoopVersion}:tests" + compileOnly "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}:tests" } archivesBaseName = 'gemfire-hydra-tests' diff --git a/tests/sql/build.gradle b/tests/sql/build.gradle index 784a3f599..28b4085be 100644 --- a/tests/sql/build.gradle +++ b/tests/sql/build.gradle @@ -16,7 +16,7 @@ */ plugins { - id 'com.github.johnrengelman.shadow' version '1.2.3' + id 'com.github.johnrengelman.shadow' version '2.0.1' } sourceSets.main.java { @@ -40,40 +40,50 @@ ext { } dependencies { - provided project(subprojectBase + 'gemfire-core') + compileOnly project(subprojectBase + 'gemfire-core') compile project(subprojectBase + 'gemfire-tests') compile project(subprojectBase + 'snappydata-store-client') - provided project(subprojectBase + 'snappydata-store-tools') - provided project(subprojectBase + 'snappydata-store-core') + compileOnly project(subprojectBase + 'snappydata-store-tools') + compileOnly project(subprojectBase + 'snappydata-store-core') - provided "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-auth:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-common:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" - provided "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" - provided "com.google.protobuf:protobuf-java:${protobufVersion}" - provided "com.sun.jersey:jersey-core:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-server:${sunJerseyVersion}" - provided "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" - provided "org.mortbay.jetty:jetty:${hadoopJettyVersion}" - provided "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" - provided "com.google.code.findbugs:jsr305:${jsr305Version}" + compileOnly "org.apache.hadoop:hadoop-annotations:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-auth:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-common:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-hdfs:${hadoopVersion}" + compileOnly "org.apache.hadoop:hadoop-mapreduce-client-core:${hadoopVersion}" + compileOnly "com.google.protobuf:protobuf-java:${protobufVersion}" + compileOnly "org.glassfish.jersey.core:jersey-server:${jerseyVersion}" + compileOnly "org.glassfish.jersey.containers:jersey-container-servlet-core:${jerseyVersion}" + compileOnly "org.eclipse.jetty:jetty-server:${jettyVersion}" + compileOnly "org.eclipse.jetty:jetty-util:${jettyVersion}" + compileOnly "com.google.code.findbugs:jsr305:${jsr305Version}" - provided "org.springframework:spring-aop:${springVersion}" - provided "org.springframework:spring-beans:${springVersion}" - provided "org.springframework:spring-context:${springVersion}" - provided "org.springframework:spring-context-support:${springVersion}" - provided "org.springframework:spring-core:${springVersion}" - provided "org.springframework:spring-expression:${springVersion}" - provided "org.springframework:spring-web:${springVersion}" - provided "org.springframework:spring-webmvc:${springVersion}" - provided "org.springframework:spring-tx:${springVersion}" + compileOnly "org.springframework:spring-aop:${springVersion}" + compileOnly "org.springframework:spring-beans:${springVersion}" + compileOnly "org.springframework:spring-context:${springVersion}" + compileOnly "org.springframework:spring-context-support:${springVersion}" + compileOnly "org.springframework:spring-core:${springVersion}" + compileOnly "org.springframework:spring-expression:${springVersion}" + compileOnly "org.springframework:spring-web:${springVersion}" + compileOnly "org.springframework:spring-webmvc:${springVersion}" + compileOnly "org.springframework:spring-tx:${springVersion}" + + compile "org.apache.hbase:hbase-common:${hbaseVersion}" + compile "org.apache.hbase:hbase-protocol:${hbaseVersion}" + compile "org.apache.hbase:hbase-client:${hbaseVersion}" + compile "org.apache.hbase:hbase-server:${hbaseVersion}" + compile "com.sun.jersey:jersey-core:${sunJerseyVersion}" + compile "com.sun.jersey:jersey-server:${sunJerseyVersion}" + compile "com.sun.jersey:jersey-servlet:${sunJerseyVersion}" + compile "org.mortbay.jetty:jetty:${hadoopJettyVersion}" + compile "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}" + compile 'org.cloudera.htrace:htrace-core:2.05' compile 'com.googlecode.json-simple:json-simple:1.1.1' compile 'org.hibernate.javax.persistence:hibernate-jpa-2.0-api:1.0.1.Final' compile 'javax.ejb:javax.ejb-api:3.2' - compile 'org.apache.tomcat:tomcat-juli:8.5.9' - compile 'org.apache.tomcat:tomcat-jdbc:8.5.9' + compile 'org.apache.tomcat:tomcat-juli:8.5.21' + compile 'org.apache.tomcat:tomcat-jdbc:8.5.21' compile 'com.mchange:c3p0:0.9.5.2' compile "org.apache.derby:derbytools:${derbyVersion}" runtime "org.apache.derby:derby:${derbyVersion}"