diff --git a/.gitignore b/.gitignore index a107f43f..f6c0392d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,23 +1,24 @@ # ignore compiled byte code target # ignore output files from testing output* # ignore standard Eclipse files .project .classpath .settings .checkstyle # ignore standard IntelliJ files .idea/ *.iml +*.ipr *.iws # ignore standard Vim and Emacs temp files *.swp *~ # ignore standard Mac OS X files/dirs .DS_Store diff --git a/README.md b/README.md index 82a6a1b0..b3c2642e 100644 --- a/README.md +++ b/README.md @@ -1,77 +1,77 @@ Yahoo! Cloud System Benchmark (YCSB) ==================================== [![Build Status](https://travis-ci.org/brianfrankcooper/YCSB.png?branch=master)](https://travis-ci.org/brianfrankcooper/YCSB) Links ----- http://wiki.github.com/brianfrankcooper/YCSB/ https://labs.yahoo.com/news/yahoo-cloud-serving-benchmark/ ycsb-users@yahoogroups.com Getting Started --------------- 1. Download the [latest release of YCSB](https://github.com/brianfrankcooper/YCSB/releases/latest): ```sh - curl -O --location https://github.com/brianfrankcooper/YCSB/releases/download/0.9.0/ycsb-0.9.0.tar.gz - tar xfvz ycsb-0.9.0.tar.gz - cd ycsb-0.9.0 + curl -O --location https://github.com/brianfrankcooper/YCSB/releases/download/0.11.0/ycsb-0.11.0.tar.gz + tar xfvz ycsb-0.11.0.tar.gz + cd ycsb-0.11.0 ``` 2. Set up a database to benchmark. There is a README file under each binding directory. 3. Run YCSB command. On Linux: ```sh bin/ycsb.sh load basic -P workloads/workloada bin/ycsb.sh run basic -P workloads/workloada ``` On Windows: ```bat bin/ycsb.bat load basic -P workloads\workloada bin/ycsb.bat run basic -P workloads\workloada ``` Running the `ycsb` command without any argument will print the usage. See https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload for a detailed documentation on how to run a workload. See https://github.com/brianfrankcooper/YCSB/wiki/Core-Properties for the list of available workload properties. Building from source -------------------- YCSB requires the use of Maven 3; if you use Maven 2, you may see [errors such as these](https://github.com/brianfrankcooper/YCSB/issues/406). To build the full distribution, with all database bindings: mvn clean package To build a single database binding: mvn -pl com.yahoo.ycsb:mongodb-binding -am clean package diff --git a/accumulo/pom.xml b/accumulo/pom.xml index cba28b8a..eb098f88 100644 --- a/accumulo/pom.xml +++ b/accumulo/pom.xml @@ -1,83 +1,83 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent accumulo-binding Accumulo DB Binding 2.2.0 org.apache.accumulo accumulo-core ${accumulo.version} org.apache.hadoop hadoop-common ${hadoop.version} com.yahoo.ycsb core ${project.version} provided junit junit 4.12 test org.apache.accumulo accumulo-minicluster ${accumulo.version} test org.slf4j slf4j-api 1.7.13 ../workloads workloads src/test/resources diff --git a/aerospike/pom.xml b/aerospike/pom.xml index 684c127a..bd0c77c3 100644 --- a/aerospike/pom.xml +++ b/aerospike/pom.xml @@ -1,45 +1,45 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent aerospike-binding Aerospike DB Binding jar com.aerospike aerospike-client ${aerospike.version} com.yahoo.ycsb core ${project.version} provided diff --git a/asynchbase/pom.xml b/asynchbase/pom.xml index 4e083ec6..2092a88b 100644 --- a/asynchbase/pom.xml +++ b/asynchbase/pom.xml @@ -1,105 +1,105 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent/ asynchbase-binding AsyncHBase Client Binding for Apache HBase org.hbase asynchbase ${asynchbase.version} com.yahoo.ycsb core ${project.version} provided org.apache.zookeeper zookeeper 3.4.5 log4j log4j org.slf4j slf4j-log4j12 jline jline junit junit org.jboss.netty netty junit junit 4.12 test org.apache.hbase hbase-testing-util ${hbase10.version} test org.apache.hbase hbase-client ${hbase10.version} test log4j log4j 1.2.17 test org.slf4j log4j-over-slf4j 1.7.7 test \ No newline at end of file diff --git a/bin/bindings.properties b/bin/bindings.properties index a6946d86..da5dac7a 100644 --- a/bin/bindings.properties +++ b/bin/bindings.properties @@ -1,64 +1,63 @@ # # Copyright (c) 2012 - 2016 YCSB contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. See accompanying # LICENSE file. # #DATABASE BINDINGS # # Available bindings should be listed here in the form of # name:class # # - the name must start in column 0. # - the name is also the directory where the class can be found. # - if the directory contains multiple versions with different classes, # use a dash with the version. (e.g. cassandra-7, cassandra-cql) # accumulo:com.yahoo.ycsb.db.accumulo.AccumuloClient aerospike:com.yahoo.ycsb.db.AerospikeClient asynchbase:com.yahoo.ycsb.db.AsyncHBaseClient +arangodb:com.yahoo.ycsb.db.ArangoDBClient basic:com.yahoo.ycsb.BasicDB -cassandra-7:com.yahoo.ycsb.db.CassandraClient7 -cassandra-8:com.yahoo.ycsb.db.CassandraClient8 -cassandra-10:com.yahoo.ycsb.db.CassandraClient10 cassandra-cql:com.yahoo.ycsb.db.CassandraCQLClient cassandra2-cql:com.yahoo.ycsb.db.CassandraCQLClient couchbase:com.yahoo.ycsb.db.CouchbaseClient couchbase2:com.yahoo.ycsb.db.couchbase2.Couchbase2Client dynamodb:com.yahoo.ycsb.db.DynamoDBClient elasticsearch:com.yahoo.ycsb.db.ElasticsearchClient geode:com.yahoo.ycsb.db.GeodeClient googlebigtable:com.yahoo.ycsb.db.GoogleBigtableClient googledatastore:com.yahoo.ycsb.db.GoogleDatastoreClient hbase094:com.yahoo.ycsb.db.HBaseClient hbase098:com.yahoo.ycsb.db.HBaseClient hbase10:com.yahoo.ycsb.db.HBaseClient10 hypertable:com.yahoo.ycsb.db.HypertableClient infinispan-cs:com.yahoo.ycsb.db.InfinispanRemoteClient infinispan:com.yahoo.ycsb.db.InfinispanClient jdbc:com.yahoo.ycsb.db.JdbcDBClient kudu:com.yahoo.ycsb.db.KuduYCSBClient mapkeeper:com.yahoo.ycsb.db.MapKeeperClient memcached:com.yahoo.ycsb.db.MemcachedClient mongodb:com.yahoo.ycsb.db.MongoDbClient mongodb-async:com.yahoo.ycsb.db.AsyncMongoDbClient nosqldb:com.yahoo.ycsb.db.NoSqlDbClient orientdb:com.yahoo.ycsb.db.OrientDBClient +rados:com.yahoo.ycsb.db.RadosClient redis:com.yahoo.ycsb.db.RedisClient riak:com.yahoo.ycsb.db.riak.RiakKVClient s3:com.yahoo.ycsb.db.S3Client solr:com.yahoo.ycsb.db.SolrClient tarantool:com.yahoo.ycsb.db.TarantoolClient voldemort:com.yahoo.ycsb.db.VoldemortClient diff --git a/bin/ycsb b/bin/ycsb index 4db2926e..470fdd8d 100755 --- a/bin/ycsb +++ b/bin/ycsb @@ -1,295 +1,297 @@ #!/usr/bin/env python # # Copyright (c) 2012 - 2015 YCSB contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. See accompanying # LICENSE file. # import errno import fnmatch import io import os import shlex import sys import subprocess try: mod = __import__('argparse') import argparse except ImportError: print >> sys.stderr, '[ERROR] argparse not found. Try installing it via "pip".' exit(1) BASE_URL = "https://github.com/brianfrankcooper/YCSB/tree/master/" COMMANDS = { "shell" : { "command" : "", "description" : "Interactive mode", "main" : "com.yahoo.ycsb.CommandLine", }, "load" : { "command" : "-load", "description" : "Execute the load phase", "main" : "com.yahoo.ycsb.Client", }, "run" : { "command" : "-t", "description" : "Execute the transaction phase", "main" : "com.yahoo.ycsb.Client", }, } DATABASES = { "accumulo" : "com.yahoo.ycsb.db.accumulo.AccumuloClient", "aerospike" : "com.yahoo.ycsb.db.AerospikeClient", + "arangodb" : "com.yahoo.ycsb.db.ArangoDBClient", "asynchbase" : "com.yahoo.ycsb.db.AsyncHBaseClient", "basic" : "com.yahoo.ycsb.BasicDB", - "cassandra-7" : "com.yahoo.ycsb.db.CassandraClient7", - "cassandra-8" : "com.yahoo.ycsb.db.CassandraClient8", - "cassandra-10" : "com.yahoo.ycsb.db.CassandraClient10", "cassandra-cql": "com.yahoo.ycsb.db.CassandraCQLClient", "cassandra2-cql": "com.yahoo.ycsb.db.CassandraCQLClient", "couchbase" : "com.yahoo.ycsb.db.CouchbaseClient", "couchbase2" : "com.yahoo.ycsb.db.couchbase2.Couchbase2Client", "dynamodb" : "com.yahoo.ycsb.db.DynamoDBClient", "elasticsearch": "com.yahoo.ycsb.db.ElasticsearchClient", "geode" : "com.yahoo.ycsb.db.GeodeClient", "googlebigtable" : "com.yahoo.ycsb.db.GoogleBigtableClient", "googledatastore" : "com.yahoo.ycsb.db.GoogleDatastoreClient", "hbase094" : "com.yahoo.ycsb.db.HBaseClient", "hbase098" : "com.yahoo.ycsb.db.HBaseClient", "hbase10" : "com.yahoo.ycsb.db.HBaseClient10", "hypertable" : "com.yahoo.ycsb.db.HypertableClient", "infinispan-cs": "com.yahoo.ycsb.db.InfinispanRemoteClient", "infinispan" : "com.yahoo.ycsb.db.InfinispanClient", "jdbc" : "com.yahoo.ycsb.db.JdbcDBClient", "kudu" : "com.yahoo.ycsb.db.KuduYCSBClient", "mapkeeper" : "com.yahoo.ycsb.db.MapKeeperClient", "memcached" : "com.yahoo.ycsb.db.MemcachedClient", "mongodb" : "com.yahoo.ycsb.db.MongoDbClient", "mongodb-async": "com.yahoo.ycsb.db.AsyncMongoDbClient", "nosqldb" : "com.yahoo.ycsb.db.NoSqlDbClient", "orientdb" : "com.yahoo.ycsb.db.OrientDBClient", "rados" : "com.yahoo.ycsb.db.RadosClient", "redis" : "com.yahoo.ycsb.db.RedisClient", "riak" : "com.yahoo.ycsb.db.riak.RiakKVClient", "s3" : "com.yahoo.ycsb.db.S3Client", "solr" : "com.yahoo.ycsb.db.SolrClient", "tarantool" : "com.yahoo.ycsb.db.TarantoolClient", "voldemort" : "com.yahoo.ycsb.db.VoldemortClient" } OPTIONS = { "-P file" : "Specify workload file", "-p key=value" : "Override workload property", "-s" : "Print status to stderr", "-target n" : "Target ops/sec (default: unthrottled)", "-threads n" : "Number of client threads (default: 1)", "-cp path" : "Additional Java classpath entries", "-jvm-args args" : "Additional arguments to the JVM", } def usage(): output = io.BytesIO() print >> output, "%s command database [options]" % sys.argv[0] print >> output, "\nCommands:" for command in sorted(COMMANDS.keys()): print >> output, " %s %s" % (command.ljust(14), COMMANDS[command]["description"]) print >> output, "\nDatabases:" for db in sorted(DATABASES.keys()): print >> output, " %s %s" % (db.ljust(14), BASE_URL + db.split("-")[0]) print >> output, "\nOptions:" for option in sorted(OPTIONS.keys()): print >> output, " %s %s" % (option.ljust(14), OPTIONS[option]) print >> output, """\nWorkload Files: There are various predefined workloads under workloads/ directory. See https://github.com/brianfrankcooper/YCSB/wiki/Core-Properties for the list of workload properties.""" return output.getvalue() # Python 2.6 doesn't have check_output. Add the method as it is in Python 2.7 # Based on https://github.com/python/cpython/blob/2.7/Lib/subprocess.py#L545 def check_output(*popenargs, **kwargs): r"""Run command with arguments and return its output as a byte string. If the exit code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and output in the output attribute. The arguments are the same as for the Popen constructor. Example: >>> check_output(["ls", "-l", "/dev/null"]) 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' The stdout argument is not allowed as it is used internally. To capture standard error in the result, use stderr=STDOUT. >>> check_output(["/bin/sh", "-c", ... "ls -l non_existent_file ; exit 0"], ... stderr=STDOUT) 'ls: non_existent_file: No such file or directory\n' """ if 'stdout' in kwargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, unused_err = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] error = subprocess.CalledProcessError(retcode, cmd) error.output = output raise error return output def debug(message): print >> sys.stderr, "[DEBUG] ", message def warn(message): print >> sys.stderr, "[WARN] ", message def error(message): print >> sys.stderr, "[ERROR] ", message def find_jars(dir, glob='*.jar'): jars = [] for (dirpath, dirnames, filenames) in os.walk(dir): for filename in fnmatch.filter(filenames, glob): jars.append(os.path.join(dirpath, filename)) return jars def get_ycsb_home(): dir = os.path.abspath(os.path.dirname(sys.argv[0])) while "LICENSE.txt" not in os.listdir(dir): dir = os.path.join(dir, os.path.pardir) return os.path.abspath(dir) def is_distribution(): # If there's a top level pom, we're a source checkout. otherwise a dist artifact return "pom.xml" not in os.listdir(get_ycsb_home()) # Run the maven dependency plugin to get the local jar paths. # presumes maven can run, so should only be run on source checkouts # will invoke the 'package' goal for the given binding in order to resolve intra-project deps # presumes maven properly handles system-specific path separators # Given module is full module name eg. 'core' or 'couchbase-binding' def get_classpath_from_maven(module): try: debug("Running 'mvn -pl com.yahoo.ycsb:" + module + " -am package -DskipTests " "dependency:build-classpath -DincludeScope=compile -Dmdep.outputFilterFile=true'") mvn_output = check_output(["mvn", "-pl", "com.yahoo.ycsb:" + module, "-am", "package", "-DskipTests", "dependency:build-classpath", "-DincludeScope=compile", "-Dmdep.outputFilterFile=true"]) # the above outputs a "classpath=/path/tojar:/path/to/other/jar" for each module # the last module will be the datastore binding line = [x for x in mvn_output.splitlines() if x.startswith("classpath=")][-1:] return line[0][len("classpath="):] except subprocess.CalledProcessError, err: error("Attempting to generate a classpath from Maven failed " "with return code '" + str(err.returncode) + "'. The output from " "Maven follows, try running " "'mvn -DskipTests package dependency:build=classpath' on your " "own and correct errors." + os.linesep + os.linesep + "mvn output:" + os.linesep + err.output) sys.exit(err.returncode) def main(): p = argparse.ArgumentParser( usage=usage(), formatter_class=argparse.RawDescriptionHelpFormatter) p.add_argument('-cp', dest='classpath', help="""Additional classpath entries, e.g. '-cp /tmp/hbase-1.0.1.1/conf'. Will be prepended to the YCSB classpath.""") p.add_argument("-jvm-args", default=[], type=shlex.split, help="""Additional arguments to pass to 'java', e.g. '-Xmx4g'""") p.add_argument("command", choices=sorted(COMMANDS), help="""Command to run.""") p.add_argument("database", choices=sorted(DATABASES), help="""Database to test.""") args, remaining = p.parse_known_args() ycsb_home = get_ycsb_home() # Use JAVA_HOME to find java binary if set, otherwise just use PATH. java = "java" java_home = os.getenv("JAVA_HOME") if java_home: java = os.path.join(java_home, "bin", "java") db_classname = DATABASES[args.database] command = COMMANDS[args.command]["command"] main_classname = COMMANDS[args.command]["main"] # Classpath set up binding = args.database.split("-")[0] - # Deprecation message for the entire cassandra-binding - if binding == "cassandra": - warn("The 'cassandra-7', 'cassandra-8', 'cassandra-10', and " - "cassandra-cql' clients are deprecated. If you are using " - "Cassandra 2.X try using the 'cassandra2-cql' client instead.") + if binding == "cassandra2": + warn("The 'cassandra2-cql' client has been deprecated. It has been " + "renamed to simply 'cassandra-cql'. This alias will be removed" + " in the next YCSB release.") + binding = "cassandra" + + if binding == "couchbase": + warn("The 'couchbase' client has been deprecated. If you are using " + "Couchbase 4.0+ try using the 'couchbase2' client instead.") if is_distribution(): db_dir = os.path.join(ycsb_home, binding + "-binding") # include top-level conf for when we're a binding-specific artifact. # If we add top-level conf to the general artifact, starting here # will allow binding-specific conf to override (because it's prepended) cp = [os.path.join(ycsb_home, "conf")] cp.extend(find_jars(os.path.join(ycsb_home, "lib"))) cp.extend(find_jars(os.path.join(db_dir, "lib"))) else: warn("Running against a source checkout. In order to get our runtime " "dependencies we'll have to invoke Maven. Depending on the state " "of your system, this may take ~30-45 seconds") db_location = "core" if binding == "basic" else binding project = "core" if binding == "basic" else binding + "-binding" db_dir = os.path.join(ycsb_home, db_location) # goes first so we can rely on side-effect of package maven_says = get_classpath_from_maven(project) # TODO when we have a version property, skip the glob cp = find_jars(os.path.join(db_dir, "target"), project + "*.jar") # alredy in jar:jar:jar form cp.append(maven_says) cp.insert(0, os.path.join(db_dir, "conf")) classpath = os.pathsep.join(cp) if args.classpath: classpath = os.pathsep.join([args.classpath, classpath]) ycsb_command = ([java] + args.jvm_args + ["-cp", classpath, main_classname, "-db", db_classname] + remaining) if command: ycsb_command.append(command) print >> sys.stderr, " ".join(ycsb_command) try: return subprocess.call(ycsb_command) except OSError as e: if e.errno == errno.ENOENT: error('Command failed. Is java installed and on your PATH?') return 1 else: raise if __name__ == '__main__': sys.exit(main()) diff --git a/bin/ycsb.bat b/bin/ycsb.bat index 231d04d6..f61a39d5 100644 --- a/bin/ycsb.bat +++ b/bin/ycsb.bat @@ -1,183 +1,208 @@ @REM @REM Copyright (c) 2012 - 2016 YCSB contributors. All rights reserved. @REM @REM Licensed under the Apache License, Version 2.0 (the "License"); you @REM may not use this file except in compliance with the License. You @REM may obtain a copy of the License at @REM @REM http://www.apache.org/licenses/LICENSE-2.0 @REM @REM Unless required by applicable law or agreed to in writing, software @REM distributed under the License is distributed on an "AS IS" BASIS, @REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or @REM implied. See the License for the specific language governing @REM permissions and limitations under the License. See accompanying @REM LICENSE file. @REM @REM ----------------------------------------------------------------------- @REM Control Script for YCSB @REM @REM Environment Variable Prerequisites @REM @REM Do not set the variables in this script. Instead put them into a script @REM setenv.sh in YCSB_HOME/bin to keep your customizations separate. @REM @REM YCSB_HOME (Optional) YCSB installation directory. If not set @REM this script will use the parent directory of where this @REM script is run from. @REM @REM JAVA_HOME (Required) Must point at your Java Development Kit @REM or Java Runtime Environment installation. @REM @REM JAVA_OPTS (Optional) Java runtime options used when any command @REM is executed. @REM @REM WARNING!!! YCSB home must be located in a directory path that doesn't @REM contain spaces. @REM @ECHO OFF SETLOCAL ENABLEDELAYEDEXPANSION @REM Only set YCSB_HOME if not already set PUSHD %~dp0.. IF NOT DEFINED YCSB_HOME SET YCSB_HOME=%CD% POPD @REM Ensure that any extra CLASSPATH variables are set via setenv.bat SET CLASSPATH= @REM Pull in customization options if exist "%YCSB_HOME%\bin\setenv.bat" call "%YCSB_HOME%\bin\setenv.bat" @REM Check if we have a usable JDK IF "%JAVA_HOME%." == "." GOTO noJavaHome -if not exist "%JAVA_HOME%\bin\java.exe" goto noJavaHome +IF NOT EXIST "%JAVA_HOME%\bin\java.exe" GOTO noJavaHome GOTO okJava :noJavaHome ECHO The JAVA_HOME environment variable is not defined correctly. GOTO exit :okJava @REM Determine YCSB command argument IF NOT "load" == "%1" GOTO noload SET YCSB_COMMAND=-load SET YCSB_CLASS=com.yahoo.ycsb.Client GOTO gotCommand :noload IF NOT "run" == "%1" GOTO noRun SET YCSB_COMMAND=-t SET YCSB_CLASS=com.yahoo.ycsb.Client GOTO gotCommand :noRun IF NOT "shell" == "%1" GOTO noShell SET YCSB_COMMAND= SET YCSB_CLASS=com.yahoo.ycsb.CommandLine GOTO gotCommand :noShell ECHO [ERROR] Found unknown command '%1' ECHO [ERROR] Expected one of 'load', 'run', or 'shell'. Exiting. GOTO exit :gotCommand @REM Find binding information FOR /F "delims=" %%G in ( 'FINDSTR /B "%2:" %YCSB_HOME%\bin\bindings.properties' ) DO SET "BINDING_LINE=%%G" IF NOT "%BINDING_LINE%." == "." GOTO gotBindingLine ECHO [ERROR] The specified binding '%2' was not found. Exiting. GOTO exit :gotBindingLine @REM Pull out binding name and class FOR /F "tokens=1-2 delims=:" %%G IN ("%BINDING_LINE%") DO ( SET BINDING_NAME=%%G SET BINDING_CLASS=%%H ) @REM Some bindings have multiple versions that are managed in the same @REM directory. @REM They are noted with a '-' after the binding name. @REM (e.g. cassandra-7 & cassandra-8) FOR /F "tokens=1 delims=-" %%G IN ("%BINDING_NAME%") DO ( SET BINDING_DIR=%%G ) @REM The 'basic' binding is core functionality IF NOT "%BINDING_NAME%" == "basic" GOTO noBasic SET BINDING_DIR=core :noBasic @REM Add Top level conf to classpath IF "%CLASSPATH%." == "." GOTO emptyClasspath SET CLASSPATH=%CLASSPATH%;%YCSB_HOME%\conf GOTO confAdded :emptyClasspath SET CLASSPATH=%YCSB_HOME%\conf :confAdded +@REM Cassandra2 deprecation message +IF NOT "%BINDING_DIR%" == "cassandra2" GOTO notAliasCassandra +echo [WARN] The 'cassandra2-cql' client has been deprecated. It has been renamed to simply 'cassandra-cql'. This alias will be removed in the next YCSB release. +SET BINDING_DIR=cassandra +:notAliasCassandra + @REM Build classpath according to source checkout or release distribution -IF EXIST "%YCSB_HOME%\pom.xml" GOTO gotRelease +IF EXIST "%YCSB_HOME%\pom.xml" GOTO gotSource @REM Core libraries FOR %%F IN (%YCSB_HOME%\lib\*.jar) DO ( SET CLASSPATH=!CLASSPATH!;%%F% ) @REM Database conf dir IF NOT EXIST "%YCSB_HOME%\%BINDING_DIR%-binding\conf" GOTO noBindingConf set CLASSPATH=%CLASSPATH%;%YCSB_HOME%\%BINDING_DIR%-binding\conf :noBindingConf @REM Database libraries FOR %%F IN (%YCSB_HOME%\%BINDING_DIR%-binding\lib\*.jar) DO ( SET CLASSPATH=!CLASSPATH!;%%F% ) GOTO classpathComplete -:gotRelease +:gotSource +@REM Check for some basic libraries to see if the source has been built. +IF EXIST "%YCSB_HOME%\%BINDING_DIR%\target\*.jar" GOTO gotJars + +@REM Call mvn to build source checkout. +IF "%BINDING_NAME%" == "basic" GOTO buildCore +SET MVN_PROJECT=%BINDING_DIR%-binding +goto gotMvnProject +:buildCore +SET MVN_PROJECT=core +:gotMvnProject + +ECHO [WARN] YCSB libraries not found. Attempting to build... +CALL mvn -pl com.yahoo.ycsb:%MVN_PROJECT% -am package -DskipTests +IF %ERRORLEVEL% NEQ 0 ( + ECHO [ERROR] Error trying to build project. Exiting. + GOTO exit +) + +:gotJars @REM Core libraries FOR %%F IN (%YCSB_HOME%\core\target\*.jar) DO ( SET CLASSPATH=!CLASSPATH!;%%F% ) @REM Database conf (need to find because location is not consistent) FOR /D /R %YCSB_HOME%\%BINDING_DIR% %%F IN (*) DO ( IF "%%~nxF" == "conf" SET CLASSPATH=!CLASSPATH!;%%F% ) @REM Database libraries FOR %%F IN (%YCSB_HOME%\%BINDING_DIR%\target\*.jar) DO ( SET CLASSPATH=!CLASSPATH!;%%F% ) @REM Database dependency libraries FOR %%F IN (%YCSB_HOME%\%BINDING_DIR%\target\dependency\*.jar) DO ( SET CLASSPATH=!CLASSPATH!;%%F% ) :classpathComplete -@REM Cassandra deprecation message -IF NOT "%BINDING_DIR%" == "cassandra" GOTO notOldCassandra -echo [WARN] The 'cassandra-7', 'cassandra-8', 'cassandra-10', and cassandra-cql' clients are deprecated. If you are using Cassandra 2.X try using the 'cassandra2-cql' client instead. -:notOldCassandra +@REM Couchbase deprecation message +IF NOT "%BINDING_DIR%" == "couchbase" GOTO notOldCouchbase +echo [WARN] The 'couchbase' client is deprecated. If you are using Couchbase 4.0+ try using the 'couchbase2' client instead. +:notOldCouchbase @REM Get the rest of the arguments, skipping the first 2 FOR /F "tokens=2*" %%G IN ("%*") DO ( SET YCSB_ARGS=%%H ) @REM Run YCSB @ECHO ON "%JAVA_HOME%\bin\java.exe" %JAVA_OPTS% -classpath "%CLASSPATH%" %YCSB_CLASS% %YCSB_COMMAND% -db %BINDING_CLASS% %YCSB_ARGS% @ECHO OFF GOTO end :exit EXIT /B 1; :end diff --git a/bin/ycsb.sh b/bin/ycsb.sh index efdb58bd..d62c22a4 100755 --- a/bin/ycsb.sh +++ b/bin/ycsb.sh @@ -1,236 +1,243 @@ #!/bin/sh # # Copyright (c) 2012 - 2016 YCSB contributors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. You # may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. See accompanying # LICENSE file. # # ----------------------------------------------------------------------------- # Control Script for YCSB # # Environment Variable Prerequisites # # Do not set the variables in this script. Instead put them into a script # setenv.sh in YCSB_HOME/bin to keep your customizations separate. # # YCSB_HOME (Optional) YCSB installation directory. If not set # this script will use the parent directory of where this # script is run from. # # JAVA_HOME (Optional) Must point at your Java Development Kit # installation. If empty, this script tries use the # available java executable. # # JAVA_OPTS (Optional) Java runtime options used when any command # is executed. # # WARNING!!! YCSB home must be located in a directory path that doesn't # contain spaces. # # www.shellcheck.net was used to validate this script # Cygwin support CYGWIN=false case "$(uname)" in CYGWIN*) CYGWIN=true;; esac # Get script path SCRIPT_DIR=$(dirname "$0" 2>/dev/null) # Only set YCSB_HOME if not already set [ -z "$YCSB_HOME" ] && YCSB_HOME=$(cd "$SCRIPT_DIR/.." || exit; pwd) # Ensure that any extra CLASSPATH variables are set via setenv.sh CLASSPATH= # Pull in customization options if [ -r "$YCSB_HOME/bin/setenv.sh" ]; then # Shellcheck wants a source, but this directive only runs if available # So, tell shellcheck to ignore # shellcheck source=/dev/null . "$YCSB_HOME/bin/setenv.sh" fi # Attempt to find the available JAVA, if JAVA_HOME not set if [ -z "$JAVA_HOME" ]; then JAVA_PATH=$(which java 2>/dev/null) if [ "x$JAVA_PATH" != "x" ]; then JAVA_HOME=$(dirname "$(dirname "$JAVA_PATH" 2>/dev/null)") fi fi # If JAVA_HOME still not set, error if [ -z "$JAVA_HOME" ]; then echo "[ERROR] Java executable not found. Exiting." exit 1; fi # Determine YCSB command argument if [ "load" = "$1" ] ; then YCSB_COMMAND=-load YCSB_CLASS=com.yahoo.ycsb.Client elif [ "run" = "$1" ] ; then YCSB_COMMAND=-t YCSB_CLASS=com.yahoo.ycsb.Client elif [ "shell" = "$1" ] ; then YCSB_COMMAND= YCSB_CLASS=com.yahoo.ycsb.CommandLine else echo "[ERROR] Found unknown command '$1'" echo "[ERROR] Expected one of 'load', 'run', or 'shell'. Exiting." exit 1; fi # Find binding information BINDING_LINE=$(grep "^$2:" "$YCSB_HOME/bin/bindings.properties" -m 1) if [ -z "$BINDING_LINE" ] ; then echo "[ERROR] The specified binding '$2' was not found. Exiting." exit 1; fi # Get binding name and class BINDING_NAME=$(echo "$BINDING_LINE" | cut -d':' -f1) BINDING_CLASS=$(echo "$BINDING_LINE" | cut -d':' -f2) # Some bindings have multiple versions that are managed in the same directory. # They are noted with a '-' after the binding name. # (e.g. cassandra-7 & cassandra-8) BINDING_DIR=$(echo "$BINDING_NAME" | cut -d'-' -f1) # The 'basic' binding is core functionality if [ "$BINDING_NAME" = "basic" ] ; then BINDING_DIR=core fi # For Cygwin, ensure paths are in UNIX format before anything is touched if $CYGWIN; then [ -n "$JAVA_HOME" ] && JAVA_HOME=$(cygpath --unix "$JAVA_HOME") [ -n "$CLASSPATH" ] && CLASSPATH=$(cygpath --path --unix "$CLASSPATH") fi # Check if source checkout, or release distribution DISTRIBUTION=true if [ -r "$YCSB_HOME/pom.xml" ]; then DISTRIBUTION=false; fi # Add Top level conf to classpath if [ -z "$CLASSPATH" ] ; then CLASSPATH="$YCSB_HOME/conf" else CLASSPATH="$CLASSPATH:$YCSB_HOME/conf" fi +# Cassandra2 deprecation message +if [ "${BINDING_DIR}" = "cassandra2" ] ; then + echo "[WARN] The 'cassandra2-cql' client has been deprecated. It has been \ +renamed to simply 'cassandra-cql'. This alias will be removed in the next \ +YCSB release." + BINDING_DIR="cassandra" +fi + # Build classpath # The "if" check after the "for" is because glob may just return the pattern # when no files are found. The "if" makes sure the file is really there. if $DISTRIBUTION; then # Core libraries for f in "$YCSB_HOME"/lib/*.jar ; do if [ -r "$f" ] ; then CLASSPATH="$CLASSPATH:$f" fi done # Database conf dir if [ -r "$YCSB_HOME"/"$BINDING_DIR"-binding/conf ] ; then CLASSPATH="$CLASSPATH:$YCSB_HOME/$BINDING_DIR-binding/conf" fi # Database libraries for f in "$YCSB_HOME"/"$BINDING_DIR"-binding/lib/*.jar ; do if [ -r "$f" ] ; then CLASSPATH="$CLASSPATH:$f" fi done # Source checkout else # Check for some basic libraries to see if the source has been built. for f in "$YCSB_HOME"/"$BINDING_DIR"/target/*.jar ; do # Call mvn to build source checkout. if [ ! -e "$f" ] ; then if [ "$BINDING_NAME" = "basic" ] ; then MVN_PROJECT=core else MVN_PROJECT="$BINDING_DIR"-binding fi echo "[WARN] YCSB libraries not found. Attempting to build..." mvn -pl com.yahoo.ycsb:"$MVN_PROJECT" -am package -DskipTests if [ "$?" -ne 0 ] ; then echo "[ERROR] Error trying to build project. Exiting." exit 1; fi fi done # Core libraries for f in "$YCSB_HOME"/core/target/*.jar ; do if [ -r "$f" ] ; then CLASSPATH="$CLASSPATH:$f" fi done # Database conf (need to find because location is not consistent) CLASSPATH_CONF=$(find "$YCSB_HOME"/$BINDING_DIR -name "conf" | while IFS="" read -r file; do echo ":$file"; done) if [ "x$CLASSPATH_CONF" != "x" ]; then CLASSPATH="$CLASSPATH$CLASSPATH_CONF" fi # Database libraries for f in "$YCSB_HOME"/"$BINDING_DIR"/target/*.jar ; do if [ -r "$f" ] ; then CLASSPATH="$CLASSPATH:$f" fi done # Database dependency libraries for f in "$YCSB_HOME"/"$BINDING_DIR"/target/dependency/*.jar ; do if [ -r "$f" ] ; then CLASSPATH="$CLASSPATH:$f" fi done fi -# Cassandra deprecation message -if [ "$BINDING_DIR" = "cassandra" ] ; then - echo "[WARN] The 'cassandra-7', 'cassandra-8', 'cassandra-10', and \ -cassandra-cql' clients are deprecated. If you are using \ -Cassandra 2.X try using the 'cassandra2-cql' client instead." +# Couchbase deprecation message +if [ "${BINDING_DIR}" = "couchbase" ] ; then + echo "[WARN] The 'couchbase' client is deprecated. If you are using \ +Couchbase 4.0+ try using the 'couchbase2' client instead." fi # For Cygwin, switch paths to Windows format before running java if $CYGWIN; then [ -n "$JAVA_HOME" ] && JAVA_HOME=$(cygpath --unix "$JAVA_HOME") [ -n "$CLASSPATH" ] && CLASSPATH=$(cygpath --path --windows "$CLASSPATH") fi # Get the rest of the arguments YCSB_ARGS=$(echo "$@" | cut -d' ' -f3-) # About to run YCSB echo "$JAVA_HOME/bin/java $JAVA_OPTS -classpath $CLASSPATH $YCSB_CLASS $YCSB_COMMAND -db $BINDING_CLASS $YCSB_ARGS" # Run YCSB # Shellcheck reports the following line as needing double quotes to prevent # globbing and word splitting. However, word splitting is the desired effect # here. So, the shellcheck error is disabled for this line. # shellcheck disable=SC2086 "$JAVA_HOME/bin/java" $JAVA_OPTS -classpath "$CLASSPATH" $YCSB_CLASS $YCSB_COMMAND -db $BINDING_CLASS $YCSB_ARGS diff --git a/binding-parent/datastore-specific-descriptor/pom.xml b/binding-parent/datastore-specific-descriptor/pom.xml index 0d94a4cf..f10a8bdd 100644 --- a/binding-parent/datastore-specific-descriptor/pom.xml +++ b/binding-parent/datastore-specific-descriptor/pom.xml @@ -1,44 +1,44 @@ 4.0.0 com.yahoo.ycsb root - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../../ datastore-specific-descriptor Per Datastore Binding descriptor jar This module contains the assembly descriptor used by the individual components to build binding-specific distributions. com.yahoo.ycsb core ${project.version} diff --git a/binding-parent/datastore-specific-descriptor/src/main/resources/assemblies/datastore-specific-assembly.xml b/binding-parent/datastore-specific-descriptor/src/main/resources/assemblies/datastore-specific-assembly.xml index 5783421c..2bf1900f 100644 --- a/binding-parent/datastore-specific-descriptor/src/main/resources/assemblies/datastore-specific-assembly.xml +++ b/binding-parent/datastore-specific-descriptor/src/main/resources/assemblies/datastore-specific-assembly.xml @@ -1,77 +1,77 @@ dist true ycsb-${artifactId}-${version} README.md .. 0644 LICENSE.txt NOTICE.txt ../bin bin 0755 - ycsb + ycsb* ../workloads workloads 0644 src/main/conf conf 0644 lib com.yahoo.ycsb:core provided true lib *:jar:* *:sources diff --git a/binding-parent/pom.xml b/binding-parent/pom.xml index 5db39be1..a05a0ae3 100644 --- a/binding-parent/pom.xml +++ b/binding-parent/pom.xml @@ -1,132 +1,132 @@ 4.0.0 com.yahoo.ycsb root - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT binding-parent YCSB Datastore Binding Parent pom This module acts as the parent for new datastore bindings. It creates a datastore specific binary artifact. datastore-specific-descriptor org.apache.maven.plugins maven-assembly-plugin ${maven.assembly.version} com.yahoo.ycsb datastore-specific-descriptor ${project.version} datastore-specific-assembly ycsb-${project.artifactId}-${project.version} tar.gz false posix package single org.apache.maven.plugins maven-checkstyle-plugin validate ../checkstyle.xml org.apache.maven.plugins maven-dependency-plugin ${maven.dependency.version} org.apache.maven.plugins maven-dependency-plugin stage-dependencies package copy-dependencies runtime datastore-binding README.md org.apache.maven.plugins maven-assembly-plugin diff --git a/cassandra/README.md b/cassandra/README.md index bff09f4e..bee44c8d 100644 --- a/cassandra/README.md +++ b/cassandra/README.md @@ -1,72 +1,80 @@ -# THIS BINDING IS DEPRECATED ----------------------------- -Date of removal from YCSB: **March 2016** -Due to the low amount of use and support for older Cassandra lineages (0.X and 1.X), YCSB will not support clients for these versions either. +# Apache Cassandra 2.x CQL binding -For Cassandra 2.X use the ```cassandra2-cql``` client: https://github.com/brianfrankcooper/YCSB/tree/master/cassandra2. +Binding for [Apache Cassandra](http://cassandra.apache.org), using the CQL API +via the [DataStax +driver](http://docs.datastax.com/en/developer/java-driver/2.1/java-driver/whatsNew2.html). -# Cassandra (0.7, 0.8, 1.x) drivers for YCSB +To run against the (deprecated) Cassandra Thrift API, use the `cassandra-10` binding. -**For Cassandra 2 CQL support, use the `cassandra2-cql` binding. The Thrift drivers below are deprecated, and the CQL driver here does not support Cassandra 2.1+.** +## Creating a table for use with YCSB -There are three drivers in the Cassandra binding: +For keyspace `ycsb`, table `usertable`: -* `cassandra-7`: Cassandra 0.7 Thrift binding. -* `cassandra-8`: Cassandra 0.8 Thrift binding. -* `cassandra-10`: Cassandra 1.0+ Thrift binding. -* `cassandra-cql`: Cassandra CQL binding, for Cassandra 1.x to 2.0. See `cassandra2/README.md` for details on parameters. - -# `cassandra-10` - -## Creating a table - -Using `cassandra-cli`: - - create keyspace usertable with placement_strategy = 'org.apache.cassandra.locator.SimpleStrategy' and strategy_options = {replication_factor:1}; - - create column family data with column_type = 'Standard' and comparator = 'UTF8Type'; + cqlsh> create keyspace ycsb + WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor': 3 }; + cqlsh> USE ycsb; + cqlsh> create table usertable ( + y_id varchar primary key, + field0 varchar, + field1 varchar, + field2 varchar, + field3 varchar, + field4 varchar, + field5 varchar, + field6 varchar, + field7 varchar, + field8 varchar, + field9 varchar); **Note that `replication_factor` and consistency levels (below) will affect performance.** -## Configuration Parameters +## Cassandra Configuration Parameters - `hosts` (**required**) - Cassandra nodes to connect to. - No default. * `port` - - Thrift port for communicating with Cassandra cluster. - * Default is `9160`. + * CQL port for communicating with Cassandra cluster. + * Default is `9042`. + +- `cassandra.keyspace` + Keyspace name - must match the keyspace for the table created (see above). + See http://docs.datastax.com/en/cql/3.1/cql/cql_reference/create_keyspace_r.html for details. -- `cassandra.columnfamily` - - Column family name - must match the column family for the table created (see above). - - Default value is `data` + - Default value is `ycsb` - `cassandra.username` - `cassandra.password` - Optional user name and password for authentication. See http://docs.datastax.com/en/cassandra/2.0/cassandra/security/security_config_native_authenticate_t.html for details. * `cassandra.readconsistencylevel` -* `cassandra.scanconsistencylevel` * `cassandra.writeconsistencylevel` - - Default value is `ONE` + * Default value is `ONE` - Consistency level for reads and writes, respectively. See the [DataStax documentation](http://docs.datastax.com/en/cassandra/2.0/cassandra/dml/dml_config_consistency_c.html) for details. - - *Note that the default setting does not provide durability in the face of node failure. Changing this setting will affect observed performance.* See also `replication_factor`, above. + * *Note that the default setting does not provide durability in the face of node failure. Changing this setting will affect observed performance.* See also `replication_factor`, above. + +* `cassandra.maxconnections` +* `cassandra.coreconnections` + * Defaults for max and core connections can be found here: https://datastax.github.io/java-driver/2.1.8/features/pooling/#pool-size. Cassandra 2.0.X falls under protocol V2, Cassandra 2.1+ falls under protocol V3. +* `cassandra.connecttimeoutmillis` +* `cassandra.readtimeoutmillis` + * Defaults for connect and read timeouts can be found here: https://docs.datastax.com/en/drivers/java/2.0/com/datastax/driver/core/SocketOptions.html. \ No newline at end of file diff --git a/cassandra/pom.xml b/cassandra/pom.xml index 04db85d5..c922bbc2 100644 --- a/cassandra/pom.xml +++ b/cassandra/pom.xml @@ -1,51 +1,141 @@ - - + 4.0.0 - com.yahoo.ycsb - binding-parent - 0.11.0-SNAPSHOT - ../binding-parent + com.yahoo.ycsb + binding-parent + 0.12.0-SNAPSHOT + ../binding-parent - + cassandra-binding - Cassandra DB Binding + Cassandra 2.1+ DB Binding jar + + + true + + - - org.apache.cassandra - cassandra-all - ${cassandra.version} - - - - com.datastax.cassandra - cassandra-driver-core - ${cassandra.cql.version} - - - com.yahoo.ycsb - core - ${project.version} - provided - + + + com.datastax.cassandra + cassandra-driver-core + ${cassandra.cql.version} + + + com.yahoo.ycsb + core + ${project.version} + provided + + + org.cassandraunit + cassandra-unit + 3.0.0.1 + shaded + test + + + junit + junit + 4.12 + test + + + + org.hyperic + sigar-dist + 1.6.4.129 + zip + test + + + + + + jdk8-tests + + 1.8 + + + false + + + + + + + central2 + sigar Repository + http://repository.jboss.org/nexus/content/groups/public-jboss/ + default + + false + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + unpack-sigar + process-test-resources + + unpack-dependencies + + + org.hyperic + sigar-dist + **/sigar-bin/lib/* + **/sigar-bin/lib/*jar + + ${project.build.directory}/cassandra-dependency + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.8 + + -Djava.library.path=${project.build.directory}/cassandra-dependency/hyperic-sigar-1.6.4/sigar-bin/lib + + + + + diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java index eeaca7ae..d4dc8c77 100755 --- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java +++ b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java @@ -1,435 +1,480 @@ /** - * Copyright (c) 2013 Yahoo! Inc. All rights reserved. + * Copyright (c) 2013-2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. See accompanying LICENSE file. * * Submitted by Chrisjan Matser on 10/11/2010. */ package com.yahoo.ycsb.db; -import com.datastax.driver.core.*; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.ColumnDefinitions; +import com.datastax.driver.core.ConsistencyLevel; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.Statement; import com.datastax.driver.core.querybuilder.Insert; import com.datastax.driver.core.querybuilder.QueryBuilder; import com.datastax.driver.core.querybuilder.Select; -import com.yahoo.ycsb.*; -import java.nio.ByteBuffer; +import com.yahoo.ycsb.ByteArrayByteIterator; +import com.yahoo.ycsb.ByteIterator; +import com.yahoo.ycsb.DB; +import com.yahoo.ycsb.DBException; +import com.yahoo.ycsb.Status; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; import java.util.Set; -import java.util.HashMap; import java.util.Vector; import java.util.concurrent.atomic.AtomicInteger; /** - * Tested with Cassandra 2.0, CQL client for YCSB framework + * Cassandra 2.x CQL client. * - * See {@code cassandra2} for a version compatible with Cassandra 2.1+. See - * {@code cassandra2/README.md} for details. + * See {@code cassandra2/README.md} for details. * * @author cmatser */ public class CassandraCQLClient extends DB { private static Cluster cluster = null; private static Session session = null; private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.ONE; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.ONE; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; + public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; + public static final String MAX_CONNECTIONS_PROPERTY = + "cassandra.maxconnections"; + public static final String CORE_CONNECTIONS_PROPERTY = + "cassandra.coreconnections"; + public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = + "cassandra.connecttimeoutmillis"; + public static final String READ_TIMEOUT_MILLIS_PROPERTY = + "cassandra.readtimeoutmillis"; + /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); - String port = getProperties().getProperty("port", "9042"); - if (port == null) { - throw new DBException(String.format( - "Required property \"%s\" missing for CassandraCQLClient", - PORT_PROPERTY)); - } + String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); - // public void connect(String node) {} if ((username != null) && !username.isEmpty()) { cluster = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts).build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } - // Update number of connections based on threads - int threadcount = - Integer.parseInt(getProperties().getProperty("threadcount", "1")); - cluster.getConfiguration().getPoolingOptions() - .setMaxConnectionsPerHost(HostDistance.LOCAL, threadcount); + String maxConnections = getProperties().getProperty( + MAX_CONNECTIONS_PROPERTY); + if (maxConnections != null) { + cluster.getConfiguration().getPoolingOptions() + .setMaxConnectionsPerHost(HostDistance.LOCAL, + Integer.valueOf(maxConnections)); + } + + String coreConnections = getProperties().getProperty( + CORE_CONNECTIONS_PROPERTY); + if (coreConnections != null) { + cluster.getConfiguration().getPoolingOptions() + .setCoreConnectionsPerHost(HostDistance.LOCAL, + Integer.valueOf(coreConnections)); + } - // Set connection timeout 3min (default is 5s) - cluster.getConfiguration().getSocketOptions() - .setConnectTimeoutMillis(3 * 60 * 1000); - // Set read (execute) timeout 3min (default is 12s) - cluster.getConfiguration().getSocketOptions() - .setReadTimeoutMillis(3 * 60 * 1000); + String connectTimoutMillis = getProperties().getProperty( + CONNECT_TIMEOUT_MILLIS_PROPERTY); + if (connectTimoutMillis != null) { + cluster.getConfiguration().getSocketOptions() + .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); + } + + String readTimoutMillis = getProperties().getProperty( + READ_TIMEOUT_MILLIS_PROPERTY); + if (readTimoutMillis != null) { + cluster.getConfiguration().getSocketOptions() + .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); + } Metadata metadata = cluster.getMetadata(); - System.out.printf("Connected to cluster: %s\n", + System.err.printf("Connected to cluster: %s\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { - if (INIT_COUNT.decrementAndGet() <= 0) { - cluster.shutdown(); + synchronized (INIT_COUNT) { + final int curInitCount = INIT_COUNT.decrementAndGet(); + if (curInitCount <= 0) { + session.close(); + cluster.close(); + cluster = null; + session = null; + } + if (curInitCount < 0) { + // This should never happen. + throw new DBException( + String.format("initCount is negative: %d", curInitCount)); + } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set fields, HashMap result) { - try { Statement stmt; Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = selectBuilder.from(table).where(QueryBuilder.eq(YCSB_KEY, key)) .limit(1); stmt.setConsistencyLevel(readConsistencyLevel); if (debug) { System.out.println(stmt.toString()); } ResultSet rs = session.execute(stmt); + if (rs.isExhausted()) { + return Status.NOT_FOUND; + } + // Should be only 1 row - if (!rs.isExhausted()) { - Row row = rs.one(); - ColumnDefinitions cd = row.getColumnDefinitions(); + Row row = rs.one(); + ColumnDefinitions cd = row.getColumnDefinitions(); - for (ColumnDefinitions.Definition def : cd) { - ByteBuffer val = row.getBytesUnsafe(def.getName()); - if (val != null) { - result.put(def.getName(), new ByteArrayByteIterator(val.array())); - } else { - result.put(def.getName(), null); - } + for (ColumnDefinitions.Definition def : cd) { + ByteBuffer val = row.getBytesUnsafe(def.getName()); + if (val != null) { + result.put(def.getName(), new ByteArrayByteIterator(val.array())); + } else { + result.put(def.getName(), null); } - } return Status.OK; } catch (Exception e) { e.printStackTrace(); System.out.println("Error reading key: " + key); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. - * + * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { try { Statement stmt; Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = stmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token('"); scanStmt.append(startkey); scanStmt.append("')"); scanStmt.append(" LIMIT "); scanStmt.append(recordcount); stmt = new SimpleStatement(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (debug) { System.out.println(stmt.toString()); } ResultSet rs = session.execute(stmt); HashMap tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { e.printStackTrace(); System.out.println("Error scanning with startkey: " + startkey); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, HashMap values) { // Insert and updates provide the same functionality return insert(table, key, values); } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, HashMap values) { try { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, key); // Add fields for (Map.Entry entry : values.entrySet()) { Object value; ByteIterator byteIterator = entry.getValue(); value = byteIterator.toString(); insertStmt.value(entry.getKey(), value); } insertStmt.setConsistencyLevel(writeConsistencyLevel); if (debug) { System.out.println(insertStmt.toString()); } session.execute(insertStmt); return Status.OK; } catch (Exception e) { e.printStackTrace(); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { Statement stmt; stmt = QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, key)); stmt.setConsistencyLevel(writeConsistencyLevel); if (debug) { System.out.println(stmt.toString()); } session.execute(stmt); return Status.OK; } catch (Exception e) { e.printStackTrace(); System.out.println("Error deleting key: " + key); } return Status.ERROR; } } diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient10.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient10.java deleted file mode 100644 index b3d8e4a7..00000000 --- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient10.java +++ /dev/null @@ -1,591 +0,0 @@ -/** - * Copyright (c) 2010 Yahoo! Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package com.yahoo.ycsb.db; - -import com.yahoo.ycsb.ByteArrayByteIterator; -import com.yahoo.ycsb.ByteIterator; -import com.yahoo.ycsb.DB; -import com.yahoo.ycsb.DBException; -import com.yahoo.ycsb.Status; -import com.yahoo.ycsb.StringByteIterator; -import com.yahoo.ycsb.Utils; - -import org.apache.cassandra.thrift.AuthenticationRequest; -import org.apache.cassandra.thrift.Cassandra; -import org.apache.cassandra.thrift.Column; -import org.apache.cassandra.thrift.ColumnOrSuperColumn; -import org.apache.cassandra.thrift.ColumnParent; -import org.apache.cassandra.thrift.ColumnPath; -import org.apache.cassandra.thrift.ConsistencyLevel; -import org.apache.cassandra.thrift.KeyRange; -import org.apache.cassandra.thrift.KeySlice; -import org.apache.cassandra.thrift.Mutation; -import org.apache.cassandra.thrift.SlicePredicate; -import org.apache.cassandra.thrift.SliceRange; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.Vector; - -//XXXX if we do replication, fix the consistency levels -/** - * Cassandra 1.0.6 client for YCSB framework. - */ -public class CassandraClient10 extends DB { - public static final int OK = 0; - public static final int ERROR = -1; - public static final ByteBuffer EMPTY_BYTE_BUFFER = - ByteBuffer.wrap(new byte[0]); - - public static final String CONNECTION_RETRY_PROPERTY = - "cassandra.connectionretries"; - public static final String CONNECTION_RETRY_PROPERTY_DEFAULT = "300"; - - public static final String OPERATION_RETRY_PROPERTY = - "cassandra.operationretries"; - public static final String OPERATION_RETRY_PROPERTY_DEFAULT = "300"; - - public static final String USERNAME_PROPERTY = "cassandra.username"; - public static final String PASSWORD_PROPERTY = "cassandra.password"; - - public static final String COLUMN_FAMILY_PROPERTY = "cassandra.columnfamily"; - public static final String COLUMN_FAMILY_PROPERTY_DEFAULT = "data"; - - public static final String READ_CONSISTENCY_LEVEL_PROPERTY = - "cassandra.readconsistencylevel"; - public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; - - public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = - "cassandra.writeconsistencylevel"; - public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; - - public static final String SCAN_CONSISTENCY_LEVEL_PROPERTY = - "cassandra.scanconsistencylevel"; - public static final String SCAN_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; - - public static final String DELETE_CONSISTENCY_LEVEL_PROPERTY = - "cassandra.deleteconsistencylevel"; - public static final String DELETE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; - - private int connectionRetries; - private int operationRetries; - private String columnFamily; - - private TTransport tr; - private Cassandra.Client client; - - private boolean debug = false; - - private String tableName = ""; - private Exception errorexception = null; - - private List mutations = new ArrayList(); - private Map> mutationMap = - new HashMap>(); - private Map>> record = - new HashMap>>(); - - private ColumnParent parent; - - private ConsistencyLevel readConsistencyLevel = ConsistencyLevel.ONE; - private ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.ONE; - private ConsistencyLevel scanConsistencyLevel = ConsistencyLevel.ONE; - private ConsistencyLevel deleteConsistencyLevel = ConsistencyLevel.ONE; - - /** - * Initialize any state for this DB. Called once per DB instance; there is one - * DB instance per client thread. - */ - public void init() throws DBException { - String hosts = getProperties().getProperty("hosts"); - if (hosts == null) { - throw new DBException( - "Required property \"hosts\" missing for CassandraClient"); - } - - columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY, - COLUMN_FAMILY_PROPERTY_DEFAULT); - parent = new ColumnParent(columnFamily); - - connectionRetries = - Integer.parseInt(getProperties().getProperty(CONNECTION_RETRY_PROPERTY, - CONNECTION_RETRY_PROPERTY_DEFAULT)); - operationRetries = - Integer.parseInt(getProperties().getProperty(OPERATION_RETRY_PROPERTY, - OPERATION_RETRY_PROPERTY_DEFAULT)); - - String username = getProperties().getProperty(USERNAME_PROPERTY); - String password = getProperties().getProperty(PASSWORD_PROPERTY); - - readConsistencyLevel = ConsistencyLevel - .valueOf(getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, - READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); - writeConsistencyLevel = ConsistencyLevel - .valueOf(getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, - WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); - scanConsistencyLevel = ConsistencyLevel - .valueOf(getProperties().getProperty(SCAN_CONSISTENCY_LEVEL_PROPERTY, - SCAN_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); - deleteConsistencyLevel = ConsistencyLevel - .valueOf(getProperties().getProperty(DELETE_CONSISTENCY_LEVEL_PROPERTY, - DELETE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); - - debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); - - String[] allhosts = hosts.split(","); - String myhost = allhosts[Utils.random().nextInt(allhosts.length)]; - - Exception connectexception = null; - - for (int retry = 0; retry < connectionRetries; retry++) { - tr = new TFramedTransport(new TSocket(myhost, - Integer.parseInt(getProperties().getProperty("port", "9160")))); - TProtocol proto = new TBinaryProtocol(tr); - client = new Cassandra.Client(proto); - try { - tr.open(); - connectexception = null; - break; - } catch (Exception e) { - connectexception = e; - } - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - if (connectexception != null) { - System.err.println("Unable to connect to " + myhost + " after " - + connectionRetries + " tries"); - throw new DBException(connectexception); - } - - if (username != null && password != null) { - Map cred = new HashMap(); - cred.put("username", username); - cred.put("password", password); - AuthenticationRequest req = new AuthenticationRequest(cred); - try { - client.login(req); - } catch (Exception e) { - throw new DBException(e); - } - } - } - - /** - * Cleanup any state for this DB. Called once per DB instance; there is one DB - * instance per client thread. - */ - public void cleanup() throws DBException { - tr.close(); - } - - /** - * Read a record from the database. Each field/value pair from the result will - * be stored in a HashMap. - * - * @param table - * The name of the table - * @param key - * The record key of the record to read. - * @param fields - * The list of fields to read, or null for all of them - * @param result - * A HashMap of field/value pairs for the result - * @return Zero on success, a non-zero error code on error - */ - public Status read(String table, String key, Set fields, - HashMap result) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - - try { - SlicePredicate predicate; - if (fields == null) { - predicate = new SlicePredicate().setSlice_range(new SliceRange( - EMPTY_BYTE_BUFFER, EMPTY_BYTE_BUFFER, false, 1000000)); - - } else { - ArrayList fieldlist = - new ArrayList(fields.size()); - for (String s : fields) { - fieldlist.add(ByteBuffer.wrap(s.getBytes("UTF-8"))); - } - - predicate = new SlicePredicate().setColumn_names(fieldlist); - } - - List results = - client.get_slice(ByteBuffer.wrap(key.getBytes("UTF-8")), parent, - predicate, readConsistencyLevel); - - if (debug) { - System.out.print("Reading key: " + key); - } - - Column column; - String name; - ByteIterator value; - for (ColumnOrSuperColumn oneresult : results) { - - column = oneresult.column; - name = new String(column.name.array(), - column.name.position() + column.name.arrayOffset(), - column.name.remaining()); - value = new ByteArrayByteIterator(column.value.array(), - column.value.position() + column.value.arrayOffset(), - column.value.remaining()); - - result.put(name, value); - - if (debug) { - System.out.print("(" + name + "=" + value + ")"); - } - } - - if (debug) { - System.out.println(); - System.out - .println("ConsistencyLevel=" + readConsistencyLevel.toString()); - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - - } - - /** - * Perform a range scan for a set of records in the database. Each field/value - * pair from the result will be stored in a HashMap. - * - * @param table - * The name of the table - * @param startkey - * The record key of the first record to read. - * @param recordcount - * The number of records to read - * @param fields - * The list of fields to read, or null for all of them - * @param result - * A Vector of HashMaps, where each HashMap is a set field/value - * pairs for one record - * @return Zero on success, a non-zero error code on error - */ - public Status scan(String table, String startkey, int recordcount, - Set fields, Vector> result) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - - try { - SlicePredicate predicate; - if (fields == null) { - predicate = new SlicePredicate().setSlice_range(new SliceRange( - EMPTY_BYTE_BUFFER, EMPTY_BYTE_BUFFER, false, 1000000)); - - } else { - ArrayList fieldlist = - new ArrayList(fields.size()); - for (String s : fields) { - fieldlist.add(ByteBuffer.wrap(s.getBytes("UTF-8"))); - } - - predicate = new SlicePredicate().setColumn_names(fieldlist); - } - - KeyRange kr = new KeyRange().setStart_key(startkey.getBytes("UTF-8")) - .setEnd_key(new byte[] {}).setCount(recordcount); - - List results = client.get_range_slices(parent, predicate, kr, - scanConsistencyLevel); - - if (debug) { - System.out.println("Scanning startkey: " + startkey); - } - - HashMap tuple; - for (KeySlice oneresult : results) { - tuple = new HashMap(); - - Column column; - String name; - ByteIterator value; - for (ColumnOrSuperColumn onecol : oneresult.columns) { - column = onecol.column; - name = new String(column.name.array(), - column.name.position() + column.name.arrayOffset(), - column.name.remaining()); - value = new ByteArrayByteIterator(column.value.array(), - column.value.position() + column.value.arrayOffset(), - column.value.remaining()); - - tuple.put(name, value); - - if (debug) { - System.out.print("(" + name + "=" + value + ")"); - } - } - - result.add(tuple); - if (debug) { - System.out.println(); - System.out - .println("ConsistencyLevel=" + scanConsistencyLevel.toString()); - } - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - /** - * Update a record in the database. Any field/value pairs in the specified - * values HashMap will be written into the record with the specified record - * key, overwriting any existing values with the same field name. - * - * @param table - * The name of the table - * @param key - * The record key of the record to write. - * @param values - * A HashMap of field/value pairs to update in the record - * @return Zero on success, a non-zero error code on error - */ - public Status update(String table, String key, - HashMap values) { - return insert(table, key, values); - } - - /** - * Insert a record in the database. Any field/value pairs in the specified - * values HashMap will be written into the record with the specified record - * key. - * - * @param table - * The name of the table - * @param key - * The record key of the record to insert. - * @param values - * A HashMap of field/value pairs to insert in the record - * @return Zero on success, a non-zero error code on error - */ - public Status insert(String table, String key, - HashMap values) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - mutations.clear(); - mutationMap.clear(); - record.clear(); - - if (debug) { - System.out.println("Inserting key: " + key); - } - - try { - ByteBuffer wrappedKey = ByteBuffer.wrap(key.getBytes("UTF-8")); - - Column col; - ColumnOrSuperColumn column; - for (Map.Entry entry : values.entrySet()) { - col = new Column(); - col.setName(ByteBuffer.wrap(entry.getKey().getBytes("UTF-8"))); - col.setValue(ByteBuffer.wrap(entry.getValue().toArray())); - col.setTimestamp(System.currentTimeMillis()); - - column = new ColumnOrSuperColumn(); - column.setColumn(col); - - mutations.add(new Mutation().setColumn_or_supercolumn(column)); - } - - mutationMap.put(columnFamily, mutations); - record.put(wrappedKey, mutationMap); - - client.batch_mutate(record, writeConsistencyLevel); - - if (debug) { - System.out - .println("ConsistencyLevel=" + writeConsistencyLevel.toString()); - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - /** - * Delete a record from the database. - * - * @param table - * The name of the table - * @param key - * The record key of the record to delete. - * @return Zero on success, a non-zero error code on error - */ - public Status delete(String table, String key) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - try { - client.remove(ByteBuffer.wrap(key.getBytes("UTF-8")), - new ColumnPath(columnFamily), System.currentTimeMillis(), - deleteConsistencyLevel); - - if (debug) { - System.out.println("Delete key: " + key); - System.out - .println("ConsistencyLevel=" + deleteConsistencyLevel.toString()); - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - public static void main(String[] args) { - CassandraClient10 cli = new CassandraClient10(); - - Properties props = new Properties(); - - props.setProperty("hosts", args[0]); - cli.setProperties(props); - - try { - cli.init(); - } catch (Exception e) { - e.printStackTrace(); - System.exit(0); - } - - HashMap vals = new HashMap(); - vals.put("age", new StringByteIterator("57")); - vals.put("middlename", new StringByteIterator("bradley")); - vals.put("favoritecolor", new StringByteIterator("blue")); - Status res = cli.insert("usertable", "BrianFrankCooper", vals); - System.out.println("Result of insert: " + res); - - HashMap result = new HashMap(); - HashSet fields = new HashSet(); - fields.add("middlename"); - fields.add("age"); - fields.add("favoritecolor"); - res = cli.read("usertable", "BrianFrankCooper", null, result); - System.out.println("Result of read: " + res); - for (Map.Entry entry : result.entrySet()) { - System.out.println("[" + entry.getKey() + "]=[" + entry.getValue() + "]"); - } - - res = cli.delete("usertable", "BrianFrankCooper"); - System.out.println("Result of delete: " + res); - } -} diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient7.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient7.java deleted file mode 100644 index f2075e7f..00000000 --- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient7.java +++ /dev/null @@ -1,549 +0,0 @@ -/** - * Copyright (c) 2010 Yahoo! Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package com.yahoo.ycsb.db; - -import com.yahoo.ycsb.ByteArrayByteIterator; -import com.yahoo.ycsb.ByteIterator; -import com.yahoo.ycsb.DB; -import com.yahoo.ycsb.DBException; -import com.yahoo.ycsb.Status; -import com.yahoo.ycsb.StringByteIterator; -import com.yahoo.ycsb.Utils; - -import org.apache.cassandra.thrift.AuthenticationRequest; -import org.apache.cassandra.thrift.Cassandra; -import org.apache.cassandra.thrift.Column; -import org.apache.cassandra.thrift.ColumnOrSuperColumn; -import org.apache.cassandra.thrift.ColumnParent; -import org.apache.cassandra.thrift.ColumnPath; -import org.apache.cassandra.thrift.ConsistencyLevel; -import org.apache.cassandra.thrift.KeyRange; -import org.apache.cassandra.thrift.KeySlice; -import org.apache.cassandra.thrift.Mutation; -import org.apache.cassandra.thrift.SlicePredicate; -import org.apache.cassandra.thrift.SliceRange; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.Vector; - -//XXXX if we do replication, fix the consistency levels -/** - * Cassandra 0.7 client for YCSB framework. - */ -public class CassandraClient7 extends DB { - public static final ByteBuffer EMPTY_BYTE_BUFFER = - ByteBuffer.wrap(new byte[0]); - - public static final String CONNECTION_RETRY_PROPERTY = - "cassandra.connectionretries"; - public static final String CONNECTION_RETRY_PROPERTY_DEFAULT = "300"; - - public static final String OPERATION_RETRY_PROPERTY = - "cassandra.operationretries"; - public static final String OPERATION_RETRY_PROPERTY_DEFAULT = "300"; - - public static final String USERNAME_PROPERTY = "cassandra.username"; - public static final String PASSWORD_PROPERTY = "cassandra.password"; - - public static final String COLUMN_FAMILY_PROPERTY = "cassandra.columnfamily"; - public static final String COLUMN_FAMILY_PROPERTY_DEFAULT = "data"; - - private int connectionRetries; - private int operationRetries; - private String columnFamily; - - private TTransport tr; - private Cassandra.Client client; - - private boolean debug = false; - - private String tableName = ""; - private Exception errorexception = null; - - private List mutations = new ArrayList(); - private Map> mutationMap = - new HashMap>(); - private Map>> record = - new HashMap>>(); - - private ColumnParent parent; - - /** - * Initialize any state for this DB. Called once per DB instance; there is one - * DB instance per client thread. - */ - public void init() throws DBException { - String hosts = getProperties().getProperty("hosts"); - if (hosts == null) { - throw new DBException( - "Required property \"hosts\" missing for CassandraClient"); - } - - columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY, - COLUMN_FAMILY_PROPERTY_DEFAULT); - parent = new ColumnParent(columnFamily); - - connectionRetries = - Integer.parseInt(getProperties().getProperty(CONNECTION_RETRY_PROPERTY, - CONNECTION_RETRY_PROPERTY_DEFAULT)); - operationRetries = - Integer.parseInt(getProperties().getProperty(OPERATION_RETRY_PROPERTY, - OPERATION_RETRY_PROPERTY_DEFAULT)); - - String username = getProperties().getProperty(USERNAME_PROPERTY); - String password = getProperties().getProperty(PASSWORD_PROPERTY); - - debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); - - String[] allhosts = hosts.split(","); - String myhost = allhosts[Utils.random().nextInt(allhosts.length)]; - - Exception connectexception = null; - - for (int retry = 0; retry < connectionRetries; retry++) { - tr = new TFramedTransport(new TSocket(myhost, 9160)); - TProtocol proto = new TBinaryProtocol(tr); - client = new Cassandra.Client(proto); - try { - tr.open(); - connectexception = null; - break; - } catch (Exception e) { - connectexception = e; - } - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - if (connectexception != null) { - System.err.println("Unable to connect to " + myhost + " after " - + connectionRetries + " tries"); - System.out.println("Unable to connect to " + myhost + " after " - + connectionRetries + " tries"); - throw new DBException(connectexception); - } - - if (username != null && password != null) { - Map cred = new HashMap(); - cred.put("username", username); - cred.put("password", password); - AuthenticationRequest req = new AuthenticationRequest(cred); - try { - client.login(req); - } catch (Exception e) { - throw new DBException(e); - } - } - } - - /** - * Cleanup any state for this DB. Called once per DB instance; there is one DB - * instance per client thread. - */ - public void cleanup() throws DBException { - tr.close(); - } - - /** - * Read a record from the database. Each field/value pair from the result will - * be stored in a HashMap. - * - * @param table - * The name of the table - * @param key - * The record key of the record to read. - * @param fields - * The list of fields to read, or null for all of them - * @param result - * A HashMap of field/value pairs for the result - * @return Zero on success, a non-zero error code on error - */ - public Status read(String table, String key, Set fields, - HashMap result) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - - try { - SlicePredicate predicate; - if (fields == null) { - SliceRange range = new SliceRange(EMPTY_BYTE_BUFFER, - EMPTY_BYTE_BUFFER, false, 1000000); - - predicate = new SlicePredicate().setSlice_range(range); - - } else { - ArrayList fieldlist = - new ArrayList(fields.size()); - for (String s : fields) { - fieldlist.add(ByteBuffer.wrap(s.getBytes("UTF-8"))); - } - - predicate = new SlicePredicate().setColumn_names(fieldlist); - } - - List results = - client.get_slice(ByteBuffer.wrap(key.getBytes("UTF-8")), parent, - predicate, ConsistencyLevel.ONE); - - if (debug) { - System.out.print("Reading key: " + key); - } - - Column column; - String name; - ByteIterator value; - for (ColumnOrSuperColumn oneresult : results) { - - column = oneresult.column; - name = new String(column.name.array(), - column.name.position() + column.name.arrayOffset(), - column.name.remaining()); - value = new ByteArrayByteIterator(column.value.array(), - column.value.position() + column.value.arrayOffset(), - column.value.remaining()); - - result.put(name, value); - - if (debug) { - System.out.print("(" + name + "=" + value + ")"); - } - } - - if (debug) { - System.out.println(); - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - - } - - /** - * Perform a range scan for a set of records in the database. Each field/value - * pair from the result will be stored in a HashMap. - * - * @param table - * The name of the table - * @param startkey - * The record key of the first record to read. - * @param recordcount - * The number of records to read - * @param fields - * The list of fields to read, or null for all of them - * @param result - * A Vector of HashMaps, where each HashMap is a set field/value - * pairs for one record - * @return Zero on success, a non-zero error code on error - */ - public Status scan(String table, String startkey, int recordcount, - Set fields, Vector> result) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - - try { - SlicePredicate predicate; - if (fields == null) { - SliceRange range = new SliceRange(EMPTY_BYTE_BUFFER, - EMPTY_BYTE_BUFFER, false, 1000000); - - predicate = new SlicePredicate().setSlice_range(range); - - } else { - ArrayList fieldlist = - new ArrayList(fields.size()); - for (String s : fields) { - fieldlist.add(ByteBuffer.wrap(s.getBytes("UTF-8"))); - } - - predicate = new SlicePredicate().setColumn_names(fieldlist); - } - - KeyRange kr = new KeyRange().setStart_key(startkey.getBytes("UTF-8")) - .setEnd_key(new byte[] {}).setCount(recordcount); - - List results = client.get_range_slices(parent, predicate, kr, - ConsistencyLevel.ONE); - - if (debug) { - System.out.println("Scanning startkey: " + startkey); - } - - HashMap tuple; - for (KeySlice oneresult : results) { - tuple = new HashMap(); - - Column column; - String name; - ByteIterator value; - for (ColumnOrSuperColumn onecol : oneresult.columns) { - column = onecol.column; - name = new String(column.name.array(), - column.name.position() + column.name.arrayOffset(), - column.name.remaining()); - value = new ByteArrayByteIterator(column.value.array(), - column.value.position() + column.value.arrayOffset(), - column.value.remaining()); - - tuple.put(name, value); - - if (debug) { - System.out.print("(" + name + "=" + value + ")"); - } - } - - result.add(tuple); - if (debug) { - System.out.println(); - } - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - /** - * Update a record in the database. Any field/value pairs in the specified - * values HashMap will be written into the record with the specified record - * key, overwriting any existing values with the same field name. - * - * @param table - * The name of the table - * @param key - * The record key of the record to write. - * @param values - * A HashMap of field/value pairs to update in the record - * @return Zero on success, a non-zero error code on error - */ - public Status update(String table, String key, - HashMap values) { - return insert(table, key, values); - } - - /** - * Insert a record in the database. Any field/value pairs in the specified - * values HashMap will be written into the record with the specified record - * key. - * - * @param table - * The name of the table - * @param key - * The record key of the record to insert. - * @param values - * A HashMap of field/value pairs to insert in the record - * @return Zero on success, a non-zero error code on error - */ - public Status insert(String table, String key, - HashMap values) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - mutations.clear(); - mutationMap.clear(); - record.clear(); - - if (debug) { - System.out.println("Inserting key: " + key); - } - - try { - ByteBuffer wrappedKey = ByteBuffer.wrap(key.getBytes("UTF-8")); - - Column col; - ColumnOrSuperColumn column; - for (Map.Entry entry : values.entrySet()) { - col = new Column(); - col.setName(ByteBuffer.wrap(entry.getKey().getBytes("UTF-8"))); - col.setValue(ByteBuffer.wrap(entry.getValue().toArray())); - col.setTimestamp(System.currentTimeMillis()); - - column = new ColumnOrSuperColumn(); - column.setColumn(col); - - mutations.add(new Mutation().setColumn_or_supercolumn(column)); - } - - mutationMap.put(columnFamily, mutations); - record.put(wrappedKey, mutationMap); - - client.batch_mutate(record, ConsistencyLevel.ONE); - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - /** - * Delete a record from the database. - * - * @param table - * The name of the table - * @param key - * The record key of the record to delete. - * @return Zero on success, a non-zero error code on error - */ - public Status delete(String table, String key) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - try { - client.remove(ByteBuffer.wrap(key.getBytes("UTF-8")), - new ColumnPath(columnFamily), System.currentTimeMillis(), - ConsistencyLevel.ONE); - - if (debug) { - System.out.println("Delete key: " + key); - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - public static void main(String[] args) { - CassandraClient7 cli = new CassandraClient7(); - - Properties props = new Properties(); - - props.setProperty("hosts", args[0]); - cli.setProperties(props); - - try { - cli.init(); - } catch (Exception e) { - e.printStackTrace(); - System.exit(0); - } - - HashMap vals = new HashMap(); - vals.put("age", new StringByteIterator("57")); - vals.put("middlename", new StringByteIterator("bradley")); - vals.put("favoritecolor", new StringByteIterator("blue")); - Status res = cli.insert("usertable", "BrianFrankCooper", vals); - System.out.println("Result of insert: " + res.getName()); - - HashMap result = new HashMap(); - HashSet fields = new HashSet(); - fields.add("middlename"); - fields.add("age"); - fields.add("favoritecolor"); - res = cli.read("usertable", "BrianFrankCooper", null, result); - System.out.println("Result of read: " + res.getName()); - for (Map.Entry entry : result.entrySet()) { - System.out.println("[" + entry.getKey() + "]=[" + entry.getValue() + "]"); - } - - res = cli.delete("usertable", "BrianFrankCooper"); - System.out.println("Result of delete: " + res.getName()); - } -} diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient8.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient8.java deleted file mode 100644 index ca72c339..00000000 --- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraClient8.java +++ /dev/null @@ -1,528 +0,0 @@ -/** - * Copyright (c) 2010 Yahoo! Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package com.yahoo.ycsb.db; - -import com.yahoo.ycsb.*; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Vector; -import java.util.Properties; -import java.nio.ByteBuffer; - -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.cassandra.thrift.*; - -//XXXX if we do replication, fix the consistency levels -/** - * Cassandra 0.8 client for YCSB framework. - */ -public class CassandraClient8 extends DB { - public static final ByteBuffer EMPTY_BYTE_BUFFER = - ByteBuffer.wrap(new byte[0]); - - public static final String CONNECTION_RETRY_PROPERTY = - "cassandra.connectionretries"; - public static final String CONNECTION_RETRY_PROPERTY_DEFAULT = "300"; - - public static final String OPERATION_RETRY_PROPERTY = - "cassandra.operationretries"; - public static final String OPERATION_RETRY_PROPERTY_DEFAULT = "300"; - - public static final String USERNAME_PROPERTY = "cassandra.username"; - public static final String PASSWORD_PROPERTY = "cassandra.password"; - - public static final String COLUMN_FAMILY_PROPERTY = "cassandra.columnfamily"; - public static final String COLUMN_FAMILY_PROPERTY_DEFAULT = "data"; - - private int connectionRetries; - private int operationRetries; - private String columnFamily; - - private TTransport tr; - private Cassandra.Client client; - - private boolean debug = false; - - private String tableName = ""; - private Exception errorexception = null; - - private List mutations = new ArrayList(); - private Map> mutationMap = - new HashMap>(); - private Map>> record = - new HashMap>>(); - - private ColumnParent parent; - - /** - * Initialize any state for this DB. Called once per DB instance; there is one - * DB instance per client thread. - */ - public void init() throws DBException { - String hosts = getProperties().getProperty("hosts"); - if (hosts == null) { - throw new DBException( - "Required property \"hosts\" missing for CassandraClient"); - } - - columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY, - COLUMN_FAMILY_PROPERTY_DEFAULT); - parent = new ColumnParent(columnFamily); - - connectionRetries = - Integer.parseInt(getProperties().getProperty(CONNECTION_RETRY_PROPERTY, - CONNECTION_RETRY_PROPERTY_DEFAULT)); - operationRetries = - Integer.parseInt(getProperties().getProperty(OPERATION_RETRY_PROPERTY, - OPERATION_RETRY_PROPERTY_DEFAULT)); - - String username = getProperties().getProperty(USERNAME_PROPERTY); - String password = getProperties().getProperty(PASSWORD_PROPERTY); - - debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); - - String[] allhosts = hosts.split(","); - String myhost = allhosts[Utils.random().nextInt(allhosts.length)]; - - Exception connectexception = null; - - for (int retry = 0; retry < connectionRetries; retry++) { - tr = new TFramedTransport(new TSocket(myhost, 9160)); - TProtocol proto = new TBinaryProtocol(tr); - client = new Cassandra.Client(proto); - try { - tr.open(); - connectexception = null; - break; - } catch (Exception e) { - connectexception = e; - } - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - if (connectexception != null) { - System.err.println("Unable to connect to " + myhost + " after " - + connectionRetries + " tries"); - System.out.println("Unable to connect to " + myhost + " after " - + connectionRetries + " tries"); - throw new DBException(connectexception); - } - - if (username != null && password != null) { - Map cred = new HashMap(); - cred.put("username", username); - cred.put("password", password); - AuthenticationRequest req = new AuthenticationRequest(cred); - try { - client.login(req); - } catch (Exception e) { - throw new DBException(e); - } - } - } - - /** - * Cleanup any state for this DB. Called once per DB instance; there is one DB - * instance per client thread. - */ - public void cleanup() throws DBException { - tr.close(); - } - - /** - * Read a record from the database. Each field/value pair from the result will - * be stored in a HashMap. - * - * @param table - * The name of the table - * @param key - * The record key of the record to read. - * @param fields - * The list of fields to read, or null for all of them - * @param result - * A HashMap of field/value pairs for the result - * @return Zero on success, a non-zero error code on error - */ - public Status read(String table, String key, Set fields, - HashMap result) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - - try { - SlicePredicate predicate; - if (fields == null) { - predicate = new SlicePredicate().setSlice_range(new SliceRange( - EMPTY_BYTE_BUFFER, EMPTY_BYTE_BUFFER, false, 1000000)); - - } else { - ArrayList fieldlist = - new ArrayList(fields.size()); - for (String s : fields) { - fieldlist.add(ByteBuffer.wrap(s.getBytes("UTF-8"))); - } - - predicate = new SlicePredicate().setColumn_names(fieldlist); - } - - List results = - client.get_slice(ByteBuffer.wrap(key.getBytes("UTF-8")), parent, - predicate, ConsistencyLevel.ONE); - - if (debug) { - System.out.print("Reading key: " + key); - } - - Column column; - String name; - ByteIterator value; - for (ColumnOrSuperColumn oneresult : results) { - - column = oneresult.column; - name = new String(column.name.array(), - column.name.position() + column.name.arrayOffset(), - column.name.remaining()); - value = new ByteArrayByteIterator(column.value.array(), - column.value.position() + column.value.arrayOffset(), - column.value.remaining()); - - result.put(name, value); - - if (debug) { - System.out.print("(" + name + "=" + value + ")"); - } - } - - if (debug) { - System.out.println(); - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - - } - - /** - * Perform a range scan for a set of records in the database. Each field/value - * pair from the result will be stored in a HashMap. - * - * @param table - * The name of the table - * @param startkey - * The record key of the first record to read. - * @param recordcount - * The number of records to read - * @param fields - * The list of fields to read, or null for all of them - * @param result - * A Vector of HashMaps, where each HashMap is a set field/value - * pairs for one record - * @return Zero on success, a non-zero error code on error - */ - public Status scan(String table, String startkey, int recordcount, - Set fields, Vector> result) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - - try { - SlicePredicate predicate; - if (fields == null) { - predicate = new SlicePredicate().setSlice_range(new SliceRange( - EMPTY_BYTE_BUFFER, EMPTY_BYTE_BUFFER, false, 1000000)); - - } else { - ArrayList fieldlist = - new ArrayList(fields.size()); - for (String s : fields) { - fieldlist.add(ByteBuffer.wrap(s.getBytes("UTF-8"))); - } - - predicate = new SlicePredicate().setColumn_names(fieldlist); - } - - KeyRange kr = new KeyRange().setStart_key(startkey.getBytes("UTF-8")) - .setEnd_key(new byte[] {}).setCount(recordcount); - - List results = client.get_range_slices(parent, predicate, kr, - ConsistencyLevel.ONE); - - if (debug) { - System.out.println("Scanning startkey: " + startkey); - } - - HashMap tuple; - for (KeySlice oneresult : results) { - tuple = new HashMap(); - - Column column; - String name; - ByteIterator value; - for (ColumnOrSuperColumn onecol : oneresult.columns) { - column = onecol.column; - name = new String(column.name.array(), - column.name.position() + column.name.arrayOffset(), - column.name.remaining()); - value = new ByteArrayByteIterator(column.value.array(), - column.value.position() + column.value.arrayOffset(), - column.value.remaining()); - - tuple.put(name, value); - - if (debug) { - System.out.print("(" + name + "=" + value + ")"); - } - } - - result.add(tuple); - if (debug) { - System.out.println(); - } - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - /** - * Update a record in the database. Any field/value pairs in the specified - * values HashMap will be written into the record with the specified record - * key, overwriting any existing values with the same field name. - * - * @param table - * The name of the table - * @param key - * The record key of the record to write. - * @param values - * A HashMap of field/value pairs to update in the record - * @return Zero on success, a non-zero error code on error - */ - public Status update(String table, String key, - HashMap values) { - return insert(table, key, values); - } - - /** - * Insert a record in the database. Any field/value pairs in the specified - * values HashMap will be written into the record with the specified record - * key. - * - * @param table - * The name of the table - * @param key - * The record key of the record to insert. - * @param values - * A HashMap of field/value pairs to insert in the record - * @return Zero on success, a non-zero error code on error - */ - public Status insert(String table, String key, - HashMap values) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - mutations.clear(); - mutationMap.clear(); - record.clear(); - - if (debug) { - System.out.println("Inserting key: " + key); - } - - try { - ByteBuffer wrappedKey = ByteBuffer.wrap(key.getBytes("UTF-8")); - - Column col; - ColumnOrSuperColumn column; - for (Map.Entry entry : values.entrySet()) { - col = new Column(); - col.setName(ByteBuffer.wrap(entry.getKey().getBytes("UTF-8"))); - col.setValue(ByteBuffer.wrap(entry.getValue().toArray())); - col.setTimestamp(System.currentTimeMillis()); - - column = new ColumnOrSuperColumn(); - column.setColumn(col); - - mutations.add(new Mutation().setColumn_or_supercolumn(column)); - } - - mutationMap.put(columnFamily, mutations); - record.put(wrappedKey, mutationMap); - - client.batch_mutate(record, ConsistencyLevel.ONE); - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - /** - * Delete a record from the database. - * - * @param table - * The name of the table - * @param key - * The record key of the record to delete. - * @return Zero on success, a non-zero error code on error - */ - public Status delete(String table, String key) { - if (!tableName.equals(table)) { - try { - client.set_keyspace(table); - tableName = table; - } catch (Exception e) { - e.printStackTrace(); - e.printStackTrace(System.out); - return Status.ERROR; - } - } - - for (int i = 0; i < operationRetries; i++) { - try { - client.remove(ByteBuffer.wrap(key.getBytes("UTF-8")), - new ColumnPath(columnFamily), System.currentTimeMillis(), - ConsistencyLevel.ONE); - - if (debug) { - System.out.println("Delete key: " + key); - } - - return Status.OK; - } catch (Exception e) { - errorexception = e; - } - try { - Thread.sleep(500); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - errorexception.printStackTrace(); - errorexception.printStackTrace(System.out); - return Status.ERROR; - } - - public static void main(String[] args) { - CassandraClient8 cli = new CassandraClient8(); - - Properties props = new Properties(); - - props.setProperty("hosts", args[0]); - cli.setProperties(props); - - try { - cli.init(); - } catch (Exception e) { - e.printStackTrace(); - System.exit(0); - } - - HashMap vals = new HashMap(); - vals.put("age", new StringByteIterator("57")); - vals.put("middlename", new StringByteIterator("bradley")); - vals.put("favoritecolor", new StringByteIterator("blue")); - Status res = cli.insert("usertable", "BrianFrankCooper", vals); - System.out.println("Result of insert: " + res); - - HashMap result = new HashMap(); - HashSet fields = new HashSet(); - fields.add("middlename"); - fields.add("age"); - fields.add("favoritecolor"); - res = cli.read("usertable", "BrianFrankCooper", null, result); - System.out.println("Result of read: " + res); - for (Map.Entry entry : result.entrySet()) { - System.out.println("[" + entry.getKey() + "]=[" + entry.getValue() + "]"); - } - - res = cli.delete("usertable", "BrianFrankCooper"); - System.out.println("Result of delete: " + res); - } -} diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/package-info.java b/cassandra/src/main/java/com/yahoo/ycsb/db/package-info.java index 88ce1f01..007f01dc 100644 --- a/cassandra/src/main/java/com/yahoo/ycsb/db/package-info.java +++ b/cassandra/src/main/java/com/yahoo/ycsb/db/package-info.java @@ -1,23 +1,23 @@ /* * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * The YCSB binding for Cassandra - * versions 0.7, 0.8, and 1.0.X. + * 2.1+ via CQL. */ package com.yahoo.ycsb.db; diff --git a/cassandra2/README.md b/cassandra2/README.md deleted file mode 100644 index bee44c8d..00000000 --- a/cassandra2/README.md +++ /dev/null @@ -1,80 +0,0 @@ - - -# Apache Cassandra 2.x CQL binding - -Binding for [Apache Cassandra](http://cassandra.apache.org), using the CQL API -via the [DataStax -driver](http://docs.datastax.com/en/developer/java-driver/2.1/java-driver/whatsNew2.html). - -To run against the (deprecated) Cassandra Thrift API, use the `cassandra-10` binding. - -## Creating a table for use with YCSB - -For keyspace `ycsb`, table `usertable`: - - cqlsh> create keyspace ycsb - WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor': 3 }; - cqlsh> USE ycsb; - cqlsh> create table usertable ( - y_id varchar primary key, - field0 varchar, - field1 varchar, - field2 varchar, - field3 varchar, - field4 varchar, - field5 varchar, - field6 varchar, - field7 varchar, - field8 varchar, - field9 varchar); - -**Note that `replication_factor` and consistency levels (below) will affect performance.** - -## Cassandra Configuration Parameters - -- `hosts` (**required**) - - Cassandra nodes to connect to. - - No default. - -* `port` - * CQL port for communicating with Cassandra cluster. - * Default is `9042`. - -- `cassandra.keyspace` - Keyspace name - must match the keyspace for the table created (see above). - See http://docs.datastax.com/en/cql/3.1/cql/cql_reference/create_keyspace_r.html for details. - - - Default value is `ycsb` - -- `cassandra.username` -- `cassandra.password` - - Optional user name and password for authentication. See http://docs.datastax.com/en/cassandra/2.0/cassandra/security/security_config_native_authenticate_t.html for details. - -* `cassandra.readconsistencylevel` -* `cassandra.writeconsistencylevel` - - * Default value is `ONE` - - Consistency level for reads and writes, respectively. See the [DataStax documentation](http://docs.datastax.com/en/cassandra/2.0/cassandra/dml/dml_config_consistency_c.html) for details. - * *Note that the default setting does not provide durability in the face of node failure. Changing this setting will affect observed performance.* See also `replication_factor`, above. - -* `cassandra.maxconnections` -* `cassandra.coreconnections` - * Defaults for max and core connections can be found here: https://datastax.github.io/java-driver/2.1.8/features/pooling/#pool-size. Cassandra 2.0.X falls under protocol V2, Cassandra 2.1+ falls under protocol V3. -* `cassandra.connecttimeoutmillis` -* `cassandra.readtimeoutmillis` - * Defaults for connect and read timeouts can be found here: https://docs.datastax.com/en/drivers/java/2.0/com/datastax/driver/core/SocketOptions.html. \ No newline at end of file diff --git a/cassandra2/pom.xml b/cassandra2/pom.xml deleted file mode 100644 index 45916e79..00000000 --- a/cassandra2/pom.xml +++ /dev/null @@ -1,145 +0,0 @@ - - - - - - 4.0.0 - - com.yahoo.ycsb - binding-parent - 0.11.0-SNAPSHOT - ../binding-parent - - - cassandra2-binding - Cassandra 2.1+ DB Binding - jar - - - - true - - - - - - com.datastax.cassandra - cassandra-driver-core - ${cassandra2.cql.version} - - - com.yahoo.ycsb - core - ${project.version} - provided - - - org.cassandraunit - cassandra-unit - 3.0.0.1 - shaded - test - - - junit - junit - 4.12 - test - - - - org.hyperic - sigar-dist - 1.6.4.129 - zip - test - - - - - - - jdk8-tests - - 1.8 - - - false - - - - - - - central2 - sigar Repository - http://repository.jboss.org/nexus/content/groups/public-jboss/ - default - - false - - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - unpack-sigar - process-test-resources - - unpack-dependencies - - - org.hyperic - sigar-dist - **/sigar-bin/lib/* - **/sigar-bin/lib/*jar - - ${project.build.directory}/cassandra-dependency - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.8 - - - - java.library.path - ${project.build.directory}/cassandra-dependency/hyperic-sigar-1.6.4/sigar-bin/lib - - - - - - - diff --git a/cassandra2/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java b/cassandra2/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java deleted file mode 100644 index 91faf029..00000000 --- a/cassandra2/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java +++ /dev/null @@ -1,492 +0,0 @@ -/** - * Copyright (c) 2013-2015 YCSB contributors. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. See accompanying LICENSE file. - * - * Submitted by Chrisjan Matser on 10/11/2010. - */ -package com.yahoo.ycsb.db; - -import com.datastax.driver.core.Cluster; -import com.datastax.driver.core.ColumnDefinitions; -import com.datastax.driver.core.ConsistencyLevel; -import com.datastax.driver.core.Host; -import com.datastax.driver.core.HostDistance; -import com.datastax.driver.core.Metadata; -import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Row; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.SimpleStatement; -import com.datastax.driver.core.Statement; -import com.datastax.driver.core.querybuilder.Insert; -import com.datastax.driver.core.querybuilder.QueryBuilder; -import com.datastax.driver.core.querybuilder.Select; -import com.yahoo.ycsb.ByteArrayByteIterator; -import com.yahoo.ycsb.ByteIterator; -import com.yahoo.ycsb.DB; -import com.yahoo.ycsb.DBException; -import com.yahoo.ycsb.Status; - -import java.nio.ByteBuffer; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.Vector; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Cassandra 2.x CQL client. - * - * See {@code cassandra2/README.md} for details. - * - * @author cmatser - */ -public class CassandraCQLClient extends DB { - - private static Cluster cluster = null; - private static Session session = null; - - private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.ONE; - private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.ONE; - - public static final String YCSB_KEY = "y_id"; - public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; - public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; - public static final String USERNAME_PROPERTY = "cassandra.username"; - public static final String PASSWORD_PROPERTY = "cassandra.password"; - public static final String TRACING_PROPERTY = "cassandra.tracing"; - public static final String TRACING_PROPERTY_DEFAULT = "false"; - - public static final String HOSTS_PROPERTY = "hosts"; - public static final String PORT_PROPERTY = "port"; - public static final String PORT_PROPERTY_DEFAULT = "9042"; - - public static final String READ_CONSISTENCY_LEVEL_PROPERTY = - "cassandra.readconsistencylevel"; - public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; - public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = - "cassandra.writeconsistencylevel"; - public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; - - public static final String MAX_CONNECTIONS_PROPERTY = - "cassandra.maxconnections"; - public static final String CORE_CONNECTIONS_PROPERTY = - "cassandra.coreconnections"; - public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = - "cassandra.connecttimeoutmillis"; - public static final String READ_TIMEOUT_MILLIS_PROPERTY = - "cassandra.readtimeoutmillis"; - - /** - * Count the number of times initialized to teardown on the last - * {@link #cleanup()}. - */ - private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); - - private static boolean debug = false; - - private static boolean trace = false; - - /** - * Initialize any state for this DB. Called once per DB instance; there is one - * DB instance per client thread. - */ - @Override - public void init() throws DBException { - - // Keep track of number of calls to init (for later cleanup) - INIT_COUNT.incrementAndGet(); - - // Synchronized so that we only have a single - // cluster/session instance for all the threads. - synchronized (INIT_COUNT) { - - // Check if the cluster has already been initialized - if (cluster != null) { - return; - } - - try { - - debug = - Boolean.parseBoolean(getProperties().getProperty("debug", "false")); - trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); - String host = getProperties().getProperty(HOSTS_PROPERTY); - if (host == null) { - throw new DBException(String.format( - "Required property \"%s\" missing for CassandraCQLClient", - HOSTS_PROPERTY)); - } - String[] hosts = host.split(","); - String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); - - String username = getProperties().getProperty(USERNAME_PROPERTY); - String password = getProperties().getProperty(PASSWORD_PROPERTY); - - String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, - KEYSPACE_PROPERTY_DEFAULT); - - readConsistencyLevel = ConsistencyLevel.valueOf( - getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, - READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); - writeConsistencyLevel = ConsistencyLevel.valueOf( - getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, - WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); - - if ((username != null) && !username.isEmpty()) { - cluster = Cluster.builder().withCredentials(username, password) - .withPort(Integer.valueOf(port)).addContactPoints(hosts).build(); - } else { - cluster = Cluster.builder().withPort(Integer.valueOf(port)) - .addContactPoints(hosts).build(); - } - - String maxConnections = getProperties().getProperty( - MAX_CONNECTIONS_PROPERTY); - if (maxConnections != null) { - cluster.getConfiguration().getPoolingOptions() - .setMaxConnectionsPerHost(HostDistance.LOCAL, - Integer.valueOf(maxConnections)); - } - - String coreConnections = getProperties().getProperty( - CORE_CONNECTIONS_PROPERTY); - if (coreConnections != null) { - cluster.getConfiguration().getPoolingOptions() - .setCoreConnectionsPerHost(HostDistance.LOCAL, - Integer.valueOf(coreConnections)); - } - - String connectTimoutMillis = getProperties().getProperty( - CONNECT_TIMEOUT_MILLIS_PROPERTY); - if (connectTimoutMillis != null) { - cluster.getConfiguration().getSocketOptions() - .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); - } - - String readTimoutMillis = getProperties().getProperty( - READ_TIMEOUT_MILLIS_PROPERTY); - if (readTimoutMillis != null) { - cluster.getConfiguration().getSocketOptions() - .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); - } - - Metadata metadata = cluster.getMetadata(); - System.err.printf("Connected to cluster: %s\n", - metadata.getClusterName()); - - for (Host discoveredHost : metadata.getAllHosts()) { - System.out.printf("Datacenter: %s; Host: %s; Rack: %s\n", - discoveredHost.getDatacenter(), discoveredHost.getAddress(), - discoveredHost.getRack()); - } - - session = cluster.connect(keyspace); - - } catch (Exception e) { - throw new DBException(e); - } - } // synchronized - } - - /** - * Cleanup any state for this DB. Called once per DB instance; there is one DB - * instance per client thread. - */ - @Override - public void cleanup() throws DBException { - synchronized (INIT_COUNT) { - final int curInitCount = INIT_COUNT.decrementAndGet(); - if (curInitCount <= 0) { - session.close(); - cluster.close(); - cluster = null; - session = null; - } - if (curInitCount < 0) { - // This should never happen. - throw new DBException( - String.format("initCount is negative: %d", curInitCount)); - } - } - } - - /** - * Read a record from the database. Each field/value pair from the result will - * be stored in a HashMap. - * - * @param table - * The name of the table - * @param key - * The record key of the record to read. - * @param fields - * The list of fields to read, or null for all of them - * @param result - * A HashMap of field/value pairs for the result - * @return Zero on success, a non-zero error code on error - */ - @Override - public Status read(String table, String key, Set fields, - HashMap result) { - try { - Statement stmt; - Select.Builder selectBuilder; - - if (fields == null) { - selectBuilder = QueryBuilder.select().all(); - } else { - selectBuilder = QueryBuilder.select(); - for (String col : fields) { - ((Select.Selection) selectBuilder).column(col); - } - } - - stmt = selectBuilder.from(table).where(QueryBuilder.eq(YCSB_KEY, key)) - .limit(1); - stmt.setConsistencyLevel(readConsistencyLevel); - - if (debug) { - System.out.println(stmt.toString()); - } - if (trace) { - stmt.enableTracing(); - } - ResultSet rs = session.execute(stmt); - - if (rs.isExhausted()) { - return Status.NOT_FOUND; - } - - // Should be only 1 row - Row row = rs.one(); - ColumnDefinitions cd = row.getColumnDefinitions(); - - for (ColumnDefinitions.Definition def : cd) { - ByteBuffer val = row.getBytesUnsafe(def.getName()); - if (val != null) { - result.put(def.getName(), new ByteArrayByteIterator(val.array())); - } else { - result.put(def.getName(), null); - } - } - - return Status.OK; - - } catch (Exception e) { - e.printStackTrace(); - System.out.println("Error reading key: " + key); - return Status.ERROR; - } - - } - - /** - * Perform a range scan for a set of records in the database. Each field/value - * pair from the result will be stored in a HashMap. - * - * Cassandra CQL uses "token" method for range scan which doesn't always yield - * intuitive results. - * - * @param table - * The name of the table - * @param startkey - * The record key of the first record to read. - * @param recordcount - * The number of records to read - * @param fields - * The list of fields to read, or null for all of them - * @param result - * A Vector of HashMaps, where each HashMap is a set field/value - * pairs for one record - * @return Zero on success, a non-zero error code on error - */ - @Override - public Status scan(String table, String startkey, int recordcount, - Set fields, Vector> result) { - - try { - Statement stmt; - Select.Builder selectBuilder; - - if (fields == null) { - selectBuilder = QueryBuilder.select().all(); - } else { - selectBuilder = QueryBuilder.select(); - for (String col : fields) { - ((Select.Selection) selectBuilder).column(col); - } - } - - stmt = selectBuilder.from(table); - - // The statement builder is not setup right for tokens. - // So, we need to build it manually. - String initialStmt = stmt.toString(); - StringBuilder scanStmt = new StringBuilder(); - scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); - scanStmt.append(" WHERE "); - scanStmt.append(QueryBuilder.token(YCSB_KEY)); - scanStmt.append(" >= "); - scanStmt.append("token('"); - scanStmt.append(startkey); - scanStmt.append("')"); - scanStmt.append(" LIMIT "); - scanStmt.append(recordcount); - - stmt = new SimpleStatement(scanStmt.toString()); - stmt.setConsistencyLevel(readConsistencyLevel); - - if (debug) { - System.out.println(stmt.toString()); - } - if (trace) { - stmt.enableTracing(); - } - ResultSet rs = session.execute(stmt); - - HashMap tuple; - while (!rs.isExhausted()) { - Row row = rs.one(); - tuple = new HashMap(); - - ColumnDefinitions cd = row.getColumnDefinitions(); - - for (ColumnDefinitions.Definition def : cd) { - ByteBuffer val = row.getBytesUnsafe(def.getName()); - if (val != null) { - tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); - } else { - tuple.put(def.getName(), null); - } - } - - result.add(tuple); - } - - return Status.OK; - - } catch (Exception e) { - e.printStackTrace(); - System.out.println("Error scanning with startkey: " + startkey); - return Status.ERROR; - } - - } - - /** - * Update a record in the database. Any field/value pairs in the specified - * values HashMap will be written into the record with the specified record - * key, overwriting any existing values with the same field name. - * - * @param table - * The name of the table - * @param key - * The record key of the record to write. - * @param values - * A HashMap of field/value pairs to update in the record - * @return Zero on success, a non-zero error code on error - */ - @Override - public Status update(String table, String key, - HashMap values) { - // Insert and updates provide the same functionality - return insert(table, key, values); - } - - /** - * Insert a record in the database. Any field/value pairs in the specified - * values HashMap will be written into the record with the specified record - * key. - * - * @param table - * The name of the table - * @param key - * The record key of the record to insert. - * @param values - * A HashMap of field/value pairs to insert in the record - * @return Zero on success, a non-zero error code on error - */ - @Override - public Status insert(String table, String key, - HashMap values) { - - try { - Insert insertStmt = QueryBuilder.insertInto(table); - - // Add key - insertStmt.value(YCSB_KEY, key); - - // Add fields - for (Map.Entry entry : values.entrySet()) { - Object value; - ByteIterator byteIterator = entry.getValue(); - value = byteIterator.toString(); - - insertStmt.value(entry.getKey(), value); - } - - insertStmt.setConsistencyLevel(writeConsistencyLevel); - - if (debug) { - System.out.println(insertStmt.toString()); - } - if (trace) { - insertStmt.enableTracing(); - } - session.execute(insertStmt); - - return Status.OK; - } catch (Exception e) { - e.printStackTrace(); - } - - return Status.ERROR; - } - - /** - * Delete a record from the database. - * - * @param table - * The name of the table - * @param key - * The record key of the record to delete. - * @return Zero on success, a non-zero error code on error - */ - @Override - public Status delete(String table, String key) { - - try { - Statement stmt; - - stmt = QueryBuilder.delete().from(table) - .where(QueryBuilder.eq(YCSB_KEY, key)); - stmt.setConsistencyLevel(writeConsistencyLevel); - - if (debug) { - System.out.println(stmt.toString()); - } - if (trace) { - stmt.enableTracing(); - } - session.execute(stmt); - - return Status.OK; - } catch (Exception e) { - e.printStackTrace(); - System.out.println("Error deleting key: " + key); - } - - return Status.ERROR; - } - -} diff --git a/cassandra2/src/main/java/com/yahoo/ycsb/db/package-info.java b/cassandra2/src/main/java/com/yahoo/ycsb/db/package-info.java deleted file mode 100644 index 007f01dc..00000000 --- a/cassandra2/src/main/java/com/yahoo/ycsb/db/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -/** - * The YCSB binding for Cassandra - * 2.1+ via CQL. - */ -package com.yahoo.ycsb.db; - diff --git a/cassandra2/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java b/cassandra2/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java deleted file mode 100644 index 60b7e2f3..00000000 --- a/cassandra2/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Copyright (c) 2015 YCSB contributors All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -package com.yahoo.ycsb.db; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; - -import com.google.common.collect.Sets; - -import com.datastax.driver.core.ResultSet; -import com.datastax.driver.core.Row; -import com.datastax.driver.core.Session; -import com.datastax.driver.core.Statement; -import com.datastax.driver.core.querybuilder.Insert; -import com.datastax.driver.core.querybuilder.QueryBuilder; -import com.datastax.driver.core.querybuilder.Select; -import com.yahoo.ycsb.ByteIterator; -import com.yahoo.ycsb.Status; -import com.yahoo.ycsb.StringByteIterator; -import com.yahoo.ycsb.measurements.Measurements; -import com.yahoo.ycsb.workloads.CoreWorkload; - -import org.cassandraunit.CassandraCQLUnit; -import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; -import org.junit.After; -import org.junit.Before; -import org.junit.ClassRule; -import org.junit.Test; - -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; -import java.util.Set; - -/** - * Integration tests for the Cassandra client - */ -public class CassandraCQLClientTest { - private final static String TABLE = "usertable"; - private final static String HOST = "localhost"; - private final static int PORT = 9142; - private final static String DEFAULT_ROW_KEY = "user1"; - - private CassandraCQLClient client; - private Session session; - - @ClassRule - public static CassandraCQLUnit cassandraUnit = new CassandraCQLUnit(new ClassPathCQLDataSet("ycsb.cql", "ycsb")); - - @Before - public void setUp() throws Exception { - session = cassandraUnit.getSession(); - - Properties p = new Properties(); - p.setProperty("hosts", HOST); - p.setProperty("port", Integer.toString(PORT)); - p.setProperty("table", TABLE); - - Measurements.setProperties(p); - final CoreWorkload workload = new CoreWorkload(); - workload.init(p); - client = new CassandraCQLClient(); - client.setProperties(p); - client.init(); - } - - @After - public void tearDownClient() throws Exception { - if (client != null) { - client.cleanup(); - } - client = null; - } - - @After - public void clearTable() throws Exception { - // Clear the table so that each test starts fresh. - final Statement truncate = QueryBuilder.truncate(TABLE); - if (cassandraUnit != null) { - cassandraUnit.getSession().execute(truncate); - } - } - - @Test - public void testReadMissingRow() throws Exception { - final HashMap result = new HashMap(); - final Status status = client.read(TABLE, "Missing row", null, result); - assertThat(result.size(), is(0)); - assertThat(status, is(Status.NOT_FOUND)); - } - - private void insertRow() { - final String rowKey = DEFAULT_ROW_KEY; - Insert insertStmt = QueryBuilder.insertInto(TABLE); - insertStmt.value(CassandraCQLClient.YCSB_KEY, rowKey); - - insertStmt.value("field0", "value1"); - insertStmt.value("field1", "value2"); - session.execute(insertStmt); - } - - @Test - public void testRead() throws Exception { - insertRow(); - - final HashMap result = new HashMap(); - final Status status = client.read(CoreWorkload.table, DEFAULT_ROW_KEY, null, result); - assertThat(status, is(Status.OK)); - assertThat(result.entrySet(), hasSize(11)); - assertThat(result, hasEntry("field2", null)); - - final HashMap strResult = new HashMap(); - for (final Map.Entry e : result.entrySet()) { - if (e.getValue() != null) { - strResult.put(e.getKey(), e.getValue().toString()); - } - } - assertThat(strResult, hasEntry(CassandraCQLClient.YCSB_KEY, DEFAULT_ROW_KEY)); - assertThat(strResult, hasEntry("field0", "value1")); - assertThat(strResult, hasEntry("field1", "value2")); - } - - @Test - public void testReadSingleColumn() throws Exception { - insertRow(); - final HashMap result = new HashMap(); - final Set fields = Sets.newHashSet("field1"); - final Status status = client.read(CoreWorkload.table, DEFAULT_ROW_KEY, fields, result); - assertThat(status, is(Status.OK)); - assertThat(result.entrySet(), hasSize(1)); - final Map strResult = StringByteIterator.getStringMap(result); - assertThat(strResult, hasEntry("field1", "value2")); - } - - @Test - public void testUpdate() throws Exception { - final String key = "key"; - final HashMap input = new HashMap(); - input.put("field0", "value1"); - input.put("field1", "value2"); - - final Status status = client.insert(TABLE, key, StringByteIterator.getByteIteratorMap(input)); - assertThat(status, is(Status.OK)); - - // Verify result - final Select selectStmt = - QueryBuilder.select("field0", "field1") - .from(TABLE) - .where(QueryBuilder.eq(CassandraCQLClient.YCSB_KEY, key)) - .limit(1); - - final ResultSet rs = session.execute(selectStmt); - final Row row = rs.one(); - assertThat(row, notNullValue()); - assertThat(rs.isExhausted(), is(true)); - assertThat(row.getString("field0"), is("value1")); - assertThat(row.getString("field1"), is("value2")); - } -} diff --git a/cassandra2/src/test/resources/ycsb.cql b/cassandra2/src/test/resources/ycsb.cql deleted file mode 100644 index c52ab787..00000000 --- a/cassandra2/src/test/resources/ycsb.cql +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright (c) 2015 YCSB Contributors. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you - * may not use this file except in compliance with the License. You - * may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. See accompanying - * LICENSE file. - */ - -CREATE TABLE usertable ( - y_id varchar primary key, - field0 varchar, - field1 varchar, - field2 varchar, - field3 varchar, - field4 varchar, - field5 varchar, - field6 varchar, - field7 varchar, - field8 varchar, - field9 varchar); diff --git a/core/pom.xml b/core/pom.xml index 2dc65019..858daa34 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -1,93 +1,93 @@ 4.0.0 com.yahoo.ycsb root - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT core Core YCSB jar false 1.9.4 org.apache.htrace htrace-core4 4.1.0-incubating org.codehaus.jackson jackson-mapper-asl ${jackson.api.version} org.codehaus.jackson jackson-core-asl ${jackson.api.version} org.testng testng 6.1.1 test org.hdrhistogram HdrHistogram 2.1.4 src/main/resources true org.apache.maven.plugins maven-assembly-plugin ${maven.assembly.version} jar-with-dependencies false package single diff --git a/core/src/main/java/com/yahoo/ycsb/BasicDB.java b/core/src/main/java/com/yahoo/ycsb/BasicDB.java index 83eb5e25..ec12bcda 100644 --- a/core/src/main/java/com/yahoo/ycsb/BasicDB.java +++ b/core/src/main/java/com/yahoo/ycsb/BasicDB.java @@ -1,277 +1,300 @@ /** * Copyright (c) 2010 Yahoo! Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb; import java.util.*; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.LockSupport; /** * Basic DB that just prints out the requested operations, instead of doing them against a database. */ public class BasicDB extends DB { public static final String VERBOSE="basicdb.verbose"; public static final String VERBOSE_DEFAULT="true"; public static final String SIMULATE_DELAY="basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT="0"; public static final String RANDOMIZE_DELAY="basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT="true"; boolean verbose; boolean randomizedelay; int todelay; public BasicDB() { todelay=0; } void delay() { if (todelay>0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(Utils.random().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } long now = System.nanoTime(); final long deadline = now + delayNs; do { LockSupport.parkNanos(deadline - now); } while ((now = System.nanoTime()) < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ @SuppressWarnings("unchecked") public void init() { verbose=Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay=Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay=Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); - if (verbose) + if (verbose) synchronized(System.out) { System.out.println("***************** properties *****************"); Properties p=getProperties(); if (p!=null) { for (Enumeration e=p.propertyNames(); e.hasMoreElements(); ) { String k=(String)e.nextElement(); System.out.println("\""+k+"\"=\""+p.getProperty(k)+"\""); } } System.out.println("**********************************************"); } } + static final ThreadLocal TL_STRING_BUILDER = new ThreadLocal() { + @Override + protected StringBuilder initialValue() { + return new StringBuilder(); + } + }; + + static StringBuilder getStringBuilder() { + StringBuilder sb = TL_STRING_BUILDER.get(); + sb.setLength(0); + return sb; + } + /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set fields, HashMap result) { delay(); if (verbose) { - System.out.print("READ "+table+" "+key+" [ "); + StringBuilder sb = getStringBuilder(); + sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields!=null) { for (String f : fields) { - System.out.print(f+" "); + sb.append(f).append(" "); } } else { - System.out.print(""); + sb.append(""); } - System.out.println("]"); - } + sb.append("]"); + System.out.println(sb); + } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { delay(); if (verbose) { - System.out.print("SCAN "+table+" "+startkey+" "+recordcount+" [ "); + StringBuilder sb = getStringBuilder(); + sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields!=null) { for (String f : fields) { - System.out.print(f+" "); + sb.append(f).append(" "); } } else { - System.out.print(""); + sb.append(""); } - System.out.println("]"); - } + sb.append("]"); + System.out.println(sb); + } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the record with the specified * record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, HashMap values) { delay(); if (verbose) { - System.out.print("UPDATE "+table+" "+key+" [ "); + StringBuilder sb = getStringBuilder(); + sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values!=null) { for (Map.Entry entry : values.entrySet()) { - System.out.print(entry.getKey() +"="+ entry.getValue() +" "); + sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } - System.out.println("]"); + sb.append("]"); + System.out.println(sb); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the record with the specified * record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, HashMap values) { delay(); if (verbose) { - System.out.print("INSERT "+table+" "+key+" [ "); + StringBuilder sb = getStringBuilder(); + sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values!=null) { for (Map.Entry entry : values.entrySet()) { - System.out.print(entry.getKey() +"="+ entry.getValue() +" "); + sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } - System.out.println("]"); + sb.append("]"); + System.out.println(sb); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { - System.out.println("DELETE "+table+" "+key); + StringBuilder sb = getStringBuilder(); + sb.append("DELETE ").append(table).append(" ").append(key); + System.out.println(sb); } return Status.OK; } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb=new BasicDB(); Properties p=new Properties(); p.setProperty("Sky","Blue"); p.setProperty("Ocean","Wet"); bdb.setProperties(p); bdb.init(); HashMap fields=new HashMap(); fields.put("A","X"); fields.put("B","Y"); bdb.read("table","key",null,null); bdb.insert("table","key",fields); fields=new HashMap(); fields.put("C","Z"); bdb.update("table","key",fields); bdb.delete("table","key"); }*/ } diff --git a/core/src/main/java/com/yahoo/ycsb/RandomByteIterator.java b/core/src/main/java/com/yahoo/ycsb/RandomByteIterator.java index cb0977aa..b080ec56 100644 --- a/core/src/main/java/com/yahoo/ycsb/RandomByteIterator.java +++ b/core/src/main/java/com/yahoo/ycsb/RandomByteIterator.java @@ -1,87 +1,96 @@ /** * Copyright (c) 2010 Yahoo! Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb; /** * A ByteIterator that generates a random sequence of bytes. */ public class RandomByteIterator extends ByteIterator { private long len; private long off; private int bufOff; private byte[] buf; @Override public boolean hasNext() { return (off + bufOff) < len; } private void fillBytesImpl(byte[] buffer, int base) { int bytes = Utils.random().nextInt(); - try { - buffer[base+0] = (byte)(((bytes) & 31) + ' '); - buffer[base+1] = (byte)(((bytes >> 5) & 63) + ' '); - buffer[base+2] = (byte)(((bytes >> 10) & 95) + ' '); - buffer[base+3] = (byte)(((bytes >> 15) & 31) + ' '); - buffer[base+4] = (byte)(((bytes >> 20) & 63) + ' '); - buffer[base+5] = (byte)(((bytes >> 25) & 95) + ' '); - } catch (ArrayIndexOutOfBoundsException e) { /* ignore it */ } + + switch(buffer.length - base) { + default: + buffer[base+5] = (byte)(((bytes >> 25) & 95) + ' '); + case 5: + buffer[base+4] = (byte)(((bytes >> 20) & 63) + ' '); + case 4: + buffer[base+3] = (byte)(((bytes >> 15) & 31) + ' '); + case 3: + buffer[base+2] = (byte)(((bytes >> 10) & 95) + ' '); + case 2: + buffer[base+1] = (byte)(((bytes >> 5) & 63) + ' '); + case 1: + buffer[base+0] = (byte)(((bytes) & 31) + ' '); + case 0: + break; + } } private void fillBytes() { if(bufOff == buf.length) { fillBytesImpl(buf, 0); bufOff = 0; off += buf.length; } } public RandomByteIterator(long len) { this.len = len; this.buf = new byte[6]; this.bufOff = buf.length; fillBytes(); this.off = 0; } public byte nextByte() { fillBytes(); bufOff++; return buf[bufOff-1]; } @Override public int nextBuf(byte[] buffer, int bufferOffset) { int ret; if(len - off < buffer.length - bufferOffset) { ret = (int)(len - off); } else { ret = buffer.length - bufferOffset; } int i; for(i = 0; i < ret; i+=6) { fillBytesImpl(buffer, i + bufferOffset); } off+=ret; return ret + bufferOffset; } @Override public long bytesLeft() { return len - off - bufOff; } } diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/Measurements.java b/core/src/main/java/com/yahoo/ycsb/measurements/Measurements.java index 26d340cc..f47ecd52 100644 --- a/core/src/main/java/com/yahoo/ycsb/measurements/Measurements.java +++ b/core/src/main/java/com/yahoo/ycsb/measurements/Measurements.java @@ -1,314 +1,314 @@ /** * Copyright (c) 2010 Yahoo! Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.measurements; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.measurements.exporter.MeasurementsExporter; import java.io.IOException; import java.util.Properties; import java.util.concurrent.ConcurrentHashMap; /** * Collects latency measurements, and reports them when requested. * * @author cooperb * */ public class Measurements { /** * All supported measurement types are defined in this enum. * */ public enum MeasurementType { HISTOGRAM, HDRHISTOGRAM, HDRHISTOGRAM_AND_HISTOGRAM, HDRHISTOGRAM_AND_RAW, TIMESERIES, RAW } public static final String MEASUREMENT_TYPE_PROPERTY = "measurementtype"; private static final String MEASUREMENT_TYPE_PROPERTY_DEFAULT = "hdrhistogram"; public static final String MEASUREMENT_INTERVAL = "measurement.interval"; private static final String MEASUREMENT_INTERVAL_DEFAULT = "op"; public static final String MEASUREMENT_TRACK_JVM_PROPERTY = "measurement.trackjvm"; public static final String MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT = "false"; static Measurements singleton=null; static Properties measurementproperties=null; public static void setProperties(Properties props) { measurementproperties=props; } /** * Return the singleton Measurements object. */ public synchronized static Measurements getMeasurements() { if (singleton==null) { singleton=new Measurements(measurementproperties); } return singleton; } final ConcurrentHashMap _opToMesurementMap; final ConcurrentHashMap _opToIntendedMesurementMap; final MeasurementType _measurementType; final int _measurementInterval; private Properties _props; /** * Create a new object with the specified properties. */ public Measurements(Properties props) { _opToMesurementMap=new ConcurrentHashMap(); _opToIntendedMesurementMap=new ConcurrentHashMap(); _props=props; String mTypeString = _props.getProperty(MEASUREMENT_TYPE_PROPERTY, MEASUREMENT_TYPE_PROPERTY_DEFAULT); if (mTypeString.equals("histogram")) { _measurementType = MeasurementType.HISTOGRAM; } else if (mTypeString.equals("hdrhistogram")) { _measurementType = MeasurementType.HDRHISTOGRAM; } else if (mTypeString.equals("hdrhistogram+histogram")) { _measurementType = MeasurementType.HDRHISTOGRAM_AND_HISTOGRAM; } else if (mTypeString.equals("hdrhistogram+raw")) { _measurementType = MeasurementType.HDRHISTOGRAM_AND_RAW; } else if (mTypeString.equals("timeseries")) { _measurementType = MeasurementType.TIMESERIES; } else if (mTypeString.equals("raw")) { _measurementType = MeasurementType.RAW; } else { throw new IllegalArgumentException("unknown "+MEASUREMENT_TYPE_PROPERTY+"="+mTypeString); } String mIntervalString = _props.getProperty(MEASUREMENT_INTERVAL, MEASUREMENT_INTERVAL_DEFAULT); if (mIntervalString.equals("op")) { _measurementInterval = 0; } else if (mIntervalString.equals("intended")) { _measurementInterval = 1; } else if (mIntervalString.equals("both")) { _measurementInterval = 2; } else { throw new IllegalArgumentException("unknown "+MEASUREMENT_INTERVAL+"="+mIntervalString); } } OneMeasurement constructOneMeasurement(String name) { switch (_measurementType) { case HISTOGRAM: return new OneMeasurementHistogram(name, _props); case HDRHISTOGRAM: return new OneMeasurementHdrHistogram(name, _props); case HDRHISTOGRAM_AND_HISTOGRAM: return new TwoInOneMeasurement(name, new OneMeasurementHdrHistogram("Hdr"+name, _props), new OneMeasurementHistogram("Bucket"+name, _props)); case HDRHISTOGRAM_AND_RAW: return new TwoInOneMeasurement(name, new OneMeasurementHdrHistogram("Hdr"+name, _props), - new OneMeasurementHistogram("Raw"+name, _props)); + new OneMeasurementRaw("Raw"+name, _props)); case TIMESERIES: return new OneMeasurementTimeSeries(name, _props); case RAW: return new OneMeasurementRaw(name, _props); default: throw new AssertionError("Impossible to be here. Dead code reached. Bugs?"); } } static class StartTimeHolder { long time; long startTime(){ if(time == 0) { return System.nanoTime(); } else { return time; } } } ThreadLocal tlIntendedStartTime = new ThreadLocal() { protected StartTimeHolder initialValue() { return new StartTimeHolder(); } }; public void setIntendedStartTimeNs(long time) { if(_measurementInterval==0) return; tlIntendedStartTime.get().time=time; } public long getIntendedtartTimeNs() { if(_measurementInterval==0) return 0L; return tlIntendedStartTime.get().startTime(); } /** * Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured * value. */ public void measure(String operation, int latency) { if(_measurementInterval==1) return; try { OneMeasurement m = getOpMeasurement(operation); m.measure(latency); } // This seems like a terribly hacky way to cover up for a bug in the measurement code catch (java.lang.ArrayIndexOutOfBoundsException e) { System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing"); e.printStackTrace(); e.printStackTrace(System.out); } } /** * Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured * value. */ public void measureIntended(String operation, int latency) { if(_measurementInterval==0) return; try { OneMeasurement m = getOpIntendedMeasurement(operation); m.measure(latency); } // This seems like a terribly hacky way to cover up for a bug in the measurement code catch (java.lang.ArrayIndexOutOfBoundsException e) { System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing"); e.printStackTrace(); e.printStackTrace(System.out); } } private OneMeasurement getOpMeasurement(String operation) { OneMeasurement m = _opToMesurementMap.get(operation); if(m == null) { m = constructOneMeasurement(operation); OneMeasurement oldM = _opToMesurementMap.putIfAbsent(operation, m); if(oldM != null) { m = oldM; } } return m; } private OneMeasurement getOpIntendedMeasurement(String operation) { OneMeasurement m = _opToIntendedMesurementMap.get(operation); if(m == null) { final String name = _measurementInterval==1 ? operation : "Intended-" + operation; m = constructOneMeasurement(name); OneMeasurement oldM = _opToIntendedMesurementMap.putIfAbsent(operation, m); if(oldM != null) { m = oldM; } } return m; } /** * Report a return code for a single DB operation. */ public void reportStatus(final String operation, final Status status) { OneMeasurement m = _measurementInterval==1 ? getOpIntendedMeasurement(operation) : getOpMeasurement(operation); m.reportStatus(status); } /** * Export the current measurements to a suitable format. * * @param exporter Exporter representing the type of format to write to. * @throws IOException Thrown if the export failed. */ public void exportMeasurements(MeasurementsExporter exporter) throws IOException { for (OneMeasurement measurement : _opToMesurementMap.values()) { measurement.exportMeasurements(exporter); } for (OneMeasurement measurement : _opToIntendedMesurementMap.values()) { measurement.exportMeasurements(exporter); } } /** * Return a one line summary of the measurements. */ public synchronized String getSummary() { String ret=""; for (OneMeasurement m : _opToMesurementMap.values()) { ret += m.getSummary()+" "; } for (OneMeasurement m : _opToIntendedMesurementMap.values()) { ret += m.getSummary()+" "; } return ret; } } diff --git a/couchbase/pom.xml b/couchbase/pom.xml index b321d27c..159e90b2 100644 --- a/couchbase/pom.xml +++ b/couchbase/pom.xml @@ -1,55 +1,55 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent couchbase-binding Couchbase Binding jar com.couchbase.client couchbase-client ${couchbase.version} com.yahoo.ycsb core ${project.version} provided com.fasterxml.jackson.core jackson-databind 2.2.2 org.slf4j slf4j-api diff --git a/couchbase2/README.md b/couchbase2/README.md index 786060da..7986811f 100644 --- a/couchbase2/README.md +++ b/couchbase2/README.md @@ -1,137 +1,145 @@ # Couchbase (SDK 2.x) Driver for YCSB This driver is a binding for the YCSB facilities to operate against a Couchbase Server cluster. It uses the official Couchbase Java SDK (version 2.x) and provides a rich set of configuration options, including support for the N1QL query language. ## Quickstart ### 1. Start Couchbase Server You need to start a single node or a cluster to point the client at. Please see [http://couchbase.com](couchbase.com) for more details and instructions. ### 2. Set up YCSB You can either download the release zip and run it, or just clone from master. ``` git clone git://github.com/brianfrankcooper/YCSB.git cd YCSB mvn clean package ``` ### 3. Run the Workload Before you can actually run the workload, you need to "load" the data first. ``` bin/ycsb load couchbase2 -s -P workloads/workloada ``` Then, you can run the workload: ``` bin/ycsb run couchbase2 -s -P workloads/workloada ``` Please see the general instructions in the `doc` folder if you are not sure how it all works. You can apply a property (as seen in the next section) like this: ``` bin/ycsb run couchbase -s -P workloads/workloada -p couchbase.epoll=true ``` ## N1QL Index Setup In general, every time N1QL is used (either implicitly through using `workloade` or through setting `kv=false`) some kind of index must be present to make it work. Depending on the workload and data size, choosing the right index is crucial at runtime in order to get the best performance. If in doubt, please ask at the [forums](http://forums.couchbase.com) or get in touch with our team at Couchbase. For `workloade` and the default `readallfields=true` we recommend creating the following index, and if using Couchbase Server 4.5 or later with the "Memory Optimized Index" setting on the bucket. ``` -CREATE INDEX wle_idx ON `bucketname`(meta().id); +CREATE PRIMARY INDEX ON `bucketname`; +``` + +Couchbase Server prior to 4.5 may need a slightly different index to deliver the best performance. In those releases +additional covering information may be added to the index with this form. + +``` +-CREATE INDEX wle_idx ON `bucketname`(meta().id); ``` For other workloads, different index setups might be even more performant. ## Performance Considerations As it is with any benchmark, there are lot of knobs to tune in order to get great or (if you are reading this and trying to write a competitor benchmark ;-)) bad performance. The first setting you should consider, if you are running on Linux 64bit is setting `-p couchbase.epoll=true`. This will then turn on the Epoll IO mechanisms in the underlying Netty library which provides better performance since it has less synchronization to do than the NIO default. This only works on Linux, but you are benchmarking on the OS you are deploying to, right? The second option, `boost`, sounds more magic than it actually is. By default this benchmark trades CPU for throughput, but this can be disabled by setting `-p couchbase.boost=0`. This defaults to 3, and 3 is the number of event loops run in the IO layer. 3 is a reasonable default but you should set it to the number of **physical** cores you have available on the machine if you only plan to run one YCSB instance. Make sure (using profiling) to max out your cores, but don't overdo it. ## Sync vs Async By default, since YCSB is sync the code will always wait for the operation to complete. In some cases it can be useful to just "drive load" and disable the waiting. Note that when the "-p couchbase.syncMutationResponse=false" option is used, the measured results by YCSB can basically be thrown away. Still helpful sometimes during load phases to speed them up :) ## Debugging Latency The Couchbase Java SDK has the ability to collect and dump different kinds of metrics which allow you to analyze performance during benchmarking and production. By default this option is disabled in the benchmark, but by setting `couchbase.networkMetricsInterval` and/or `couchbase.runtimeMetricsInterval` to something greater than 0 it will output the information as JSON into the configured logger. The number provides is the interval in seconds. If you are unsure what interval to pick, start with 10 or 30 seconds, depending on your runtime length. This is how such logs look like: ``` INFO: {"heap.used":{"init":268435456,"used":36500912,"committed":232259584,"max":3817865216},"gc.ps marksweep.collectionTime":0,"gc.ps scavenge.collectionTime":54,"gc.ps scavenge.collectionCount":17,"thread.count":26,"offHeap.used":{"init":2555904,"used":30865944,"committed":31719424,"max":-1},"gc.ps marksweep.collectionCount":0,"heap.pendingFinalize":0,"thread.peakCount":26,"event":{"name":"RuntimeMetrics","type":"METRIC"},"thread.startedCount":28} INFO: {"localhost/127.0.0.1:11210":{"BINARY":{"ReplaceRequest":{"SUCCESS":{"metrics":{"percentiles":{"50.0":102,"90.0":136,"95.0":155,"99.0":244,"99.9":428},"min":55,"max":1564,"count":35787,"timeUnit":"MICROSECONDS"}}},"GetRequest":{"SUCCESS":{"metrics":{"percentiles":{"50.0":74,"90.0":98,"95.0":110,"99.0":158,"99.9":358},"min":34,"max":2310,"count":35604,"timeUnit":"MICROSECONDS"}}},"GetBucketConfigRequest":{"SUCCESS":{"metrics":{"percentiles":{"50.0":462,"90.0":462,"95.0":462,"99.0":462,"99.9":462},"min":460,"max":462,"count":1,"timeUnit":"MICROSECONDS"}}}}},"event":{"name":"NetworkLatencyMetrics","type":"METRIC"}} ``` It is recommended to either feed it into a program which can analyze and visualize JSON or just dump it into a JSON pretty printer and look at it manually. Since the output can be changed (only by changing the code at the moment), you can even configure to put those messages into another couchbase bucket and then analyze it through N1QL! You can learn more about this in general [in the official docs](http://developer.couchbase.com/documentation/server/4.0/sdks/java-2.2/event-bus-metrics.html). ## Configuration Options Since no setup is the same and the goal of YCSB is to deliver realistic benchmarks, here are some setups that you can tune. Note that if you need more flexibility (let's say a custom transcoder), you still need to extend this driver and implement the facilities on your own. You can set the following properties (with the default settings applied): - couchbase.host=127.0.0.1: The hostname from one server. - couchbase.bucket=default: The bucket name to use. - couchbase.password=: The password of the bucket. - couchbase.syncMutationResponse=true: If mutations should wait for the response to complete. - couchbase.persistTo=0: Persistence durability requirement - couchbase.replicateTo=0: Replication durability requirement - couchbase.upsert=false: Use upsert instead of insert or replace. - couchbase.adhoc=false: If set to true, prepared statements are not used. - couchbase.kv=true: If set to false, mutation operations will also be performed through N1QL. - couchbase.maxParallelism=1: The server parallelism for all n1ql queries. - couchbase.kvEndpoints=1: The number of KV sockets to open per server. - couchbase.queryEndpoints=5: The number of N1QL Query sockets to open per server. - couchbase.epoll=false: If Epoll instead of NIO should be used (only available for linux. - couchbase.boost=3: If > 0 trades CPU for higher throughput. N is the number of event loops, ideally set to the number of physical cores. Setting higher than that will likely degrade performance. - couchbase.networkMetricsInterval=0: The interval in seconds when latency metrics will be logged. - - couchbase.runtimeMetricsInterval=0: The interval in seconds when runtime metrics will be logged. \ No newline at end of file + - couchbase.runtimeMetricsInterval=0: The interval in seconds when runtime metrics will be logged. + - couchbase.documentExpiry=0: Document Expiry is the amount of time(second) until a document expires in Couchbase. \ No newline at end of file diff --git a/couchbase2/pom.xml b/couchbase2/pom.xml index 9efdf01a..86dde28e 100644 --- a/couchbase2/pom.xml +++ b/couchbase2/pom.xml @@ -1,48 +1,48 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent couchbase2-binding Couchbase Java SDK 2.x Binding jar com.couchbase.client java-client ${couchbase2.version} com.yahoo.ycsb core ${project.version} provided diff --git a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java index 3d0bc039..7acd9bbb 100644 --- a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java +++ b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java @@ -1,939 +1,940 @@ /** * Copyright (c) 2016 Yahoo! Inc. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.couchbase2; import com.couchbase.client.core.env.DefaultCoreEnvironment; import com.couchbase.client.core.env.resources.IoPoolShutdownHook; import com.couchbase.client.core.logging.CouchbaseLogger; import com.couchbase.client.core.logging.CouchbaseLoggerFactory; import com.couchbase.client.core.metrics.DefaultLatencyMetricsCollectorConfig; import com.couchbase.client.core.metrics.DefaultMetricsCollectorConfig; import com.couchbase.client.core.metrics.LatencyMetricsCollectorConfig; import com.couchbase.client.core.metrics.MetricsCollectorConfig; import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonFactory; import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonGenerator; import com.couchbase.client.deps.com.fasterxml.jackson.databind.JsonNode; import com.couchbase.client.deps.com.fasterxml.jackson.databind.node.ObjectNode; import com.couchbase.client.deps.io.netty.channel.DefaultSelectStrategyFactory; import com.couchbase.client.deps.io.netty.channel.EventLoopGroup; import com.couchbase.client.deps.io.netty.channel.SelectStrategy; import com.couchbase.client.deps.io.netty.channel.SelectStrategyFactory; import com.couchbase.client.deps.io.netty.channel.epoll.EpollEventLoopGroup; import com.couchbase.client.deps.io.netty.channel.nio.NioEventLoopGroup; import com.couchbase.client.deps.io.netty.util.IntSupplier; import com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory; import com.couchbase.client.java.Bucket; import com.couchbase.client.java.Cluster; import com.couchbase.client.java.CouchbaseCluster; import com.couchbase.client.java.PersistTo; import com.couchbase.client.java.ReplicateTo; import com.couchbase.client.java.document.Document; import com.couchbase.client.java.document.RawJsonDocument; import com.couchbase.client.java.document.json.JsonArray; import com.couchbase.client.java.document.json.JsonObject; import com.couchbase.client.java.env.CouchbaseEnvironment; import com.couchbase.client.java.env.DefaultCouchbaseEnvironment; import com.couchbase.client.java.error.TemporaryFailureException; import com.couchbase.client.java.query.*; import com.couchbase.client.java.transcoder.JacksonTransformers; import com.couchbase.client.java.util.Blocking; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import rx.Observable; import rx.Subscriber; import rx.functions.Action1; import rx.functions.Func1; import java.io.StringWriter; import java.io.Writer; import java.nio.channels.spi.SelectorProvider; import java.util.*; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.LockSupport; /** * A class that wraps the 2.x Couchbase SDK to be used with YCSB. * *

The following options can be passed when using this database client to override the defaults. * *

    *
  • couchbase.host=127.0.0.1 The hostname from one server.
  • *
  • couchbase.bucket=default The bucket name to use.
  • *
  • couchbase.password= The password of the bucket.
  • *
  • couchbase.syncMutationResponse=true If mutations should wait for the response to complete.
  • *
  • couchbase.persistTo=0 Persistence durability requirement
  • *
  • couchbase.replicateTo=0 Replication durability requirement
  • *
  • couchbase.upsert=false Use upsert instead of insert or replace.
  • *
  • couchbase.adhoc=false If set to true, prepared statements are not used.
  • *
  • couchbase.kv=true If set to false, mutation operations will also be performed through N1QL.
  • *
  • couchbase.maxParallelism=1 The server parallelism for all n1ql queries.
  • *
  • couchbase.kvEndpoints=1 The number of KV sockets to open per server.
  • *
  • couchbase.queryEndpoints=5 The number of N1QL Query sockets to open per server.
  • *
  • couchbase.epoll=false If Epoll instead of NIO should be used (only available for linux.
  • *
  • couchbase.boost=3 If > 0 trades CPU for higher throughput. N is the number of event loops, ideally * set to the number of physical cores. Setting higher than that will likely degrade performance.
  • *
  • couchbase.networkMetricsInterval=0 The interval in seconds when latency metrics will be logged.
  • *
  • couchbase.runtimeMetricsInterval=0 The interval in seconds when runtime metrics will be logged.
  • + *
  • couchbase.documentExpiry=0 Document Expiry is the amount of time until a document expires in + * Couchbase.
  • *
*/ public class Couchbase2Client extends DB { static { // No need to send the full encoded_plan for this benchmark workload, less network overhead! System.setProperty("com.couchbase.query.encodedPlanEnabled", "false"); } private static final String SEPARATOR = ":"; private static final CouchbaseLogger LOGGER = CouchbaseLoggerFactory.getInstance(Couchbase2Client.class); private static final Object INIT_COORDINATOR = new Object(); private static volatile CouchbaseEnvironment env = null; private Cluster cluster; private Bucket bucket; private String bucketName; private boolean upsert; private PersistTo persistTo; private ReplicateTo replicateTo; private boolean syncMutResponse; private boolean epoll; private long kvTimeout; private boolean adhoc; private boolean kv; private int maxParallelism; private String host; private int kvEndpoints; private int queryEndpoints; private int boost; private int networkMetricsInterval; private int runtimeMetricsInterval; private String scanAllQuery; - + private int documentExpiry; + @Override public void init() throws DBException { Properties props = getProperties(); host = props.getProperty("couchbase.host", "127.0.0.1"); bucketName = props.getProperty("couchbase.bucket", "default"); String bucketPassword = props.getProperty("couchbase.password", ""); upsert = props.getProperty("couchbase.upsert", "false").equals("true"); persistTo = parsePersistTo(props.getProperty("couchbase.persistTo", "0")); replicateTo = parseReplicateTo(props.getProperty("couchbase.replicateTo", "0")); syncMutResponse = props.getProperty("couchbase.syncMutationResponse", "true").equals("true"); adhoc = props.getProperty("couchbase.adhoc", "false").equals("true"); kv = props.getProperty("couchbase.kv", "true").equals("true"); maxParallelism = Integer.parseInt(props.getProperty("couchbase.maxParallelism", "1")); kvEndpoints = Integer.parseInt(props.getProperty("couchbase.kvEndpoints", "1")); queryEndpoints = Integer.parseInt(props.getProperty("couchbase.queryEndpoints", "1")); epoll = props.getProperty("couchbase.epoll", "false").equals("true"); boost = Integer.parseInt(props.getProperty("couchbase.boost", "3")); networkMetricsInterval = Integer.parseInt(props.getProperty("couchbase.networkMetricsInterval", "0")); runtimeMetricsInterval = Integer.parseInt(props.getProperty("couchbase.runtimeMetricsInterval", "0")); - scanAllQuery = "SELECT meta().id as id FROM `" + bucketName + "` WHERE meta().id >= '$1' LIMIT $2"; + documentExpiry = Integer.parseInt(props.getProperty("couchbase.documentExpiry", "0")); + scanAllQuery = "SELECT RAW meta().id FROM `" + bucketName + + "` WHERE meta().id >= '$1' ORDER BY meta().id LIMIT $2"; try { synchronized (INIT_COORDINATOR) { if (env == null) { LatencyMetricsCollectorConfig latencyConfig = networkMetricsInterval <= 0 ? DefaultLatencyMetricsCollectorConfig.disabled() : DefaultLatencyMetricsCollectorConfig .builder() .emitFrequency(networkMetricsInterval) .emitFrequencyUnit(TimeUnit.SECONDS) .build(); MetricsCollectorConfig runtimeConfig = runtimeMetricsInterval <= 0 ? DefaultMetricsCollectorConfig.disabled() : DefaultMetricsCollectorConfig.create(runtimeMetricsInterval, TimeUnit.SECONDS); DefaultCouchbaseEnvironment.Builder builder = DefaultCouchbaseEnvironment .builder() .queryEndpoints(queryEndpoints) .callbacksOnIoPool(true) .runtimeMetricsCollectorConfig(runtimeConfig) .networkLatencyMetricsCollectorConfig(latencyConfig) .socketConnectTimeout(10000) // 10 secs socket connect timeout .connectTimeout(30000) // 30 secs overall bucket open timeout .kvTimeout(10000) // 10 instead of 2.5s for KV ops .kvEndpoints(kvEndpoints); // Tune boosting and epoll based on settings SelectStrategyFactory factory = boost > 0 ? new BackoffSelectStrategyFactory() : DefaultSelectStrategyFactory.INSTANCE; int poolSize = boost > 0 ? boost : Integer.parseInt( System.getProperty("com.couchbase.ioPoolSize", Integer.toString(DefaultCoreEnvironment.IO_POOL_SIZE)) ); ThreadFactory threadFactory = new DefaultThreadFactory("cb-io", true); EventLoopGroup group = epoll ? new EpollEventLoopGroup(poolSize, threadFactory, factory) : new NioEventLoopGroup(poolSize, threadFactory, SelectorProvider.provider(), factory); builder.ioPool(group, new IoPoolShutdownHook(group)); env = builder.build(); logParams(); } } cluster = CouchbaseCluster.create(env, host); bucket = cluster.openBucket(bucketName, bucketPassword); kvTimeout = env.kvTimeout(); } catch (Exception ex) { throw new DBException("Could not connect to Couchbase Bucket.", ex); } if (!kv && !syncMutResponse) { throw new DBException("Not waiting for N1QL responses on mutations not yet implemented."); } } /** * Helper method to log the CLI params so that on the command line debugging is easier. */ private void logParams() { StringBuilder sb = new StringBuilder(); sb.append("host=").append(host); sb.append(", bucket=").append(bucketName); sb.append(", upsert=").append(upsert); sb.append(", persistTo=").append(persistTo); sb.append(", replicateTo=").append(replicateTo); sb.append(", syncMutResponse=").append(syncMutResponse); sb.append(", adhoc=").append(adhoc); sb.append(", kv=").append(kv); sb.append(", maxParallelism=").append(maxParallelism); sb.append(", queryEndpoints=").append(queryEndpoints); sb.append(", kvEndpoints=").append(kvEndpoints); sb.append(", queryEndpoints=").append(queryEndpoints); sb.append(", epoll=").append(epoll); sb.append(", boost=").append(boost); sb.append(", networkMetricsInterval=").append(networkMetricsInterval); sb.append(", runtimeMetricsInterval=").append(runtimeMetricsInterval); LOGGER.info("===> Using Params: " + sb.toString()); } @Override public Status read(final String table, final String key, Set fields, final HashMap result) { try { String docId = formatId(table, key); if (kv) { return readKv(docId, fields, result); } else { return readN1ql(docId, fields, result); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** * Performs the {@link #read(String, String, Set, HashMap)} operation via Key/Value ("get"). * * @param docId the document ID * @param fields the fields to be loaded * @param result the result map where the doc needs to be converted into * @return The result of the operation. */ private Status readKv(final String docId, final Set fields, final HashMap result) throws Exception { RawJsonDocument loaded = bucket.get(docId, RawJsonDocument.class); if (loaded == null) { return Status.NOT_FOUND; } decode(loaded.content(), fields, result); return Status.OK; } /** * Performs the {@link #read(String, String, Set, HashMap)} operation via N1QL ("SELECT"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * * @param docId the document ID * @param fields the fields to be loaded * @param result the result map where the doc needs to be converted into * @return The result of the operation. */ private Status readN1ql(final String docId, Set fields, final HashMap result) throws Exception { String readQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` USE KEYS [$1]"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( readQuery, JsonArray.from(docId), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + readQuery + ", Errors: " + queryResult.errors()); } N1qlQueryRow row; try { row = queryResult.rows().next(); } catch (NoSuchElementException ex) { return Status.NOT_FOUND; } JsonObject content = row.value(); if (fields == null) { content = content.getObject(bucketName); // n1ql result set scoped under *.bucketName fields = content.getNames(); } for (String field : fields) { Object value = content.get(field); result.put(field, new StringByteIterator(value != null ? value.toString() : "")); } return Status.OK; } @Override public Status update(final String table, final String key, final HashMap values) { if (upsert) { return upsert(table, key, values); } try { String docId = formatId(table, key); if (kv) { return updateKv(docId, values); } else { return updateN1ql(docId, values); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** * Performs the {@link #update(String, String, HashMap)} operation via Key/Value ("replace"). * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ private Status updateKv(final String docId, final HashMap values) { waitForMutationResponse(bucket.async().replace( - RawJsonDocument.create(docId, encode(values)), + RawJsonDocument.create(docId, documentExpiry, encode(values)), persistTo, replicateTo )); return Status.OK; } /** * Performs the {@link #update(String, String, HashMap)} operation via N1QL ("UPDATE"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ private Status updateN1ql(final String docId, final HashMap values) throws Exception { String fields = encodeN1qlFields(values); String updateQuery = "UPDATE `" + bucketName + "` USE KEYS [$1] SET " + fields; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( updateQuery, JsonArray.from(docId), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + updateQuery + ", Errors: " + queryResult.errors()); } return Status.OK; } @Override public Status insert(final String table, final String key, final HashMap values) { if (upsert) { return upsert(table, key, values); } try { String docId = formatId(table, key); if (kv) { return insertKv(docId, values); } else { return insertN1ql(docId, values); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** * Performs the {@link #insert(String, String, HashMap)} operation via Key/Value ("INSERT"). * * Note that during the "load" phase it makes sense to retry TMPFAILS (so that even if the server is * overloaded temporarily the ops will succeed eventually). The current code will retry TMPFAILs * for maximum of one minute and then bubble up the error. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ private Status insertKv(final String docId, final HashMap values) { int tries = 60; // roughly 60 seconds with the 1 second sleep, not 100% accurate. for(int i = 0; i < tries; i++) { try { waitForMutationResponse(bucket.async().insert( - RawJsonDocument.create(docId, encode(values)), + RawJsonDocument.create(docId, documentExpiry, encode(values)), persistTo, replicateTo )); return Status.OK; } catch (TemporaryFailureException ex) { try { Thread.sleep(1000); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while sleeping on TMPFAIL backoff.", ex); } } } throw new RuntimeException("Still receiving TMPFAIL from the server after trying " + tries + " times. " + "Check your server."); } /** * Performs the {@link #insert(String, String, HashMap)} operation via N1QL ("INSERT"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ private Status insertN1ql(final String docId, final HashMap values) throws Exception { String insertQuery = "INSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( insertQuery, JsonArray.from(docId, valuesToJsonObject(values)), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + insertQuery + ", Errors: " + queryResult.errors()); } return Status.OK; } /** * Performs an upsert instead of insert or update using either Key/Value or N1QL. * * If this option should be used, the "-p couchbase.upsert=true" property must be set. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return The result of the operation. */ private Status upsert(final String table, final String key, final HashMap values) { try { String docId = formatId(table, key); if (kv) { return upsertKv(docId, values); } else { return upsertN1ql(docId, values); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** * Performs the {@link #upsert(String, String, HashMap)} operation via Key/Value ("upsert"). * * If this option should be used, the "-p couchbase.upsert=true" property must be set. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ private Status upsertKv(final String docId, final HashMap values) { waitForMutationResponse(bucket.async().upsert( - RawJsonDocument.create(docId, encode(values)), + RawJsonDocument.create(docId, documentExpiry, encode(values)), persistTo, replicateTo )); return Status.OK; } /** * Performs the {@link #upsert(String, String, HashMap)} operation via N1QL ("UPSERT"). * * If this option should be used, the "-p couchbase.upsert=true -p couchbase.kv=false" properties must be set. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ private Status upsertN1ql(final String docId, final HashMap values) throws Exception { String upsertQuery = "UPSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( upsertQuery, JsonArray.from(docId, valuesToJsonObject(values)), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + upsertQuery + ", Errors: " + queryResult.errors()); } return Status.OK; } @Override public Status delete(final String table, final String key) { try { String docId = formatId(table, key); if (kv) { return deleteKv(docId); } else { return deleteN1ql(docId); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** * Performs the {@link #delete(String, String)} (String, String)} operation via Key/Value ("remove"). * * @param docId the document ID. * @return The result of the operation. */ private Status deleteKv(final String docId) { waitForMutationResponse(bucket.async().remove( docId, persistTo, replicateTo )); return Status.OK; } /** * Performs the {@link #delete(String, String)} (String, String)} operation via N1QL ("DELETE"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * * @param docId the document ID. * @return The result of the operation. */ private Status deleteN1ql(final String docId) throws Exception { String deleteQuery = "DELETE FROM `" + bucketName + "` USE KEYS [$1]"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( deleteQuery, JsonArray.from(docId), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + deleteQuery + ", Errors: " + queryResult.errors()); } return Status.OK; } @Override public Status scan(final String table, final String startkey, final int recordcount, final Set fields, final Vector> result) { try { if (fields == null || fields.isEmpty()) { return scanAllFields(table, startkey, recordcount, result); } else { return scanSpecificFields(table, startkey, recordcount, fields, result); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** * Performs the {@link #scan(String, String, int, Set, Vector)} operation, optimized for all fields. * * Since the full document bodies need to be loaded anyways, it makes sense to just grab the document IDs * from N1QL and then perform the bulk loading via KV for better performance. This is a usual pattern with * Couchbase and shows the benefits of using both N1QL and KV together. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return The result of the operation. */ private Status scanAllFields(final String table, final String startkey, final int recordcount, final Vector> result) { final List> data = new ArrayList>(recordcount); - bucket.async() .query(N1qlQuery.parameterized( scanAllQuery, JsonArray.from(formatId(table, startkey), recordcount), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )) .doOnNext(new Action1() { @Override public void call(AsyncN1qlQueryResult result) { if (!result.parseSuccess()) { throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanAllQuery + ", Errors: " + result.errors()); } } }) .flatMap(new Func1>() { @Override public Observable call(AsyncN1qlQueryResult result) { return result.rows(); } }) .flatMap(new Func1>() { @Override public Observable call(AsyncN1qlQueryRow row) { - String id = new String(row.byteValue()); - return bucket.async().get( - id.substring(id.indexOf(table + SEPARATOR), id.lastIndexOf('"')), - RawJsonDocument.class - ); + String id = new String(row.byteValue()).trim(); + return bucket.async().get(id.substring(1, id.length()-1), RawJsonDocument.class); } }) .map(new Func1>() { @Override public HashMap call(RawJsonDocument document) { HashMap tuple = new HashMap(); decode(document.content(), null, tuple); return tuple; } }) .toBlocking() .forEach(new Action1>() { @Override public void call(HashMap tuple) { data.add(tuple); } }); result.addAll(data); return Status.OK; } /** * Performs the {@link #scan(String, String, int, Set, Vector)} operation N1Ql only for a subset of the fields. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return The result of the operation. */ private Status scanSpecificFields(final String table, final String startkey, final int recordcount, final Set fields, final Vector> result) { String scanSpecQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` WHERE meta().id >= '$1' LIMIT $2"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( scanSpecQuery, JsonArray.from(formatId(table, startkey), recordcount), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanSpecQuery + ", Errors: " + queryResult.errors()); } boolean allFields = fields == null || fields.isEmpty(); result.ensureCapacity(recordcount); for (N1qlQueryRow row : queryResult) { JsonObject value = row.value(); if (fields == null) { value = value.getObject(bucketName); } Set f = allFields ? value.getNames() : fields; HashMap tuple = new HashMap(f.size()); for (String field : f) { tuple.put(field, new StringByteIterator(value.getString(field))); } result.add(tuple); } return Status.OK; } /** * Helper method to block on the response, depending on the property set. * * By default, since YCSB is sync the code will always wait for the operation to complete. In some * cases it can be useful to just "drive load" and disable the waiting. Note that when the * "-p couchbase.syncMutationResponse=false" option is used, the measured results by YCSB can basically * be thrown away. Still helpful sometimes during load phases to speed them up :) * * @param input the async input observable. */ private void waitForMutationResponse(final Observable> input) { if (!syncMutResponse) { input.subscribe(new Subscriber>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { } @Override public void onNext(Document document) { } }); } else { Blocking.blockForSingle(input, kvTimeout, TimeUnit.MILLISECONDS); } } /** * Helper method to turn the values into a String, used with {@link #upsertN1ql(String, HashMap)}. * * @param values the values to encode. * @return the encoded string. */ private static String encodeN1qlFields(final HashMap values) { if (values.isEmpty()) { return ""; } StringBuilder sb = new StringBuilder(); for (Map.Entry entry : values.entrySet()) { String raw = entry.getValue().toString(); String escaped = raw.replace("\"", "\\\"").replace("\'", "\\\'"); sb.append(entry.getKey()).append("=\"").append(escaped).append("\" "); } String toReturn = sb.toString(); return toReturn.substring(0, toReturn.length() - 1); } /** * Helper method to turn the map of values into a {@link JsonObject} for further use. * * @param values the values to transform. * @return the created json object. */ private static JsonObject valuesToJsonObject(final HashMap values) { JsonObject result = JsonObject.create(); for (Map.Entry entry : values.entrySet()) { result.put(entry.getKey(), entry.getValue().toString()); } return result; } /** * Helper method to join the set of fields into a String suitable for N1QL. * * @param fields the fields to join. * @return the joined fields as a String. */ private static String joinFields(final Set fields) { if (fields == null || fields.isEmpty()) { return "*"; } StringBuilder builder = new StringBuilder(); for (String f : fields) { builder.append("`").append(f).append("`").append(","); } String toReturn = builder.toString(); return toReturn.substring(0, toReturn.length() - 1); } /** * Helper method to turn the prefix and key into a proper document ID. * * @param prefix the prefix (table). * @param key the key itself. * @return a document ID that can be used with Couchbase. */ private static String formatId(final String prefix, final String key) { return prefix + SEPARATOR + key; } /** * Helper method to parse the "ReplicateTo" property on startup. * * @param property the proeprty to parse. * @return the parsed setting. */ private static ReplicateTo parseReplicateTo(final String property) throws DBException { int value = Integer.parseInt(property); switch (value) { case 0: return ReplicateTo.NONE; case 1: return ReplicateTo.ONE; case 2: return ReplicateTo.TWO; case 3: return ReplicateTo.THREE; default: throw new DBException("\"couchbase.replicateTo\" must be between 0 and 3"); } } /** * Helper method to parse the "PersistTo" property on startup. * * @param property the proeprty to parse. * @return the parsed setting. */ private static PersistTo parsePersistTo(final String property) throws DBException { int value = Integer.parseInt(property); switch (value) { case 0: return PersistTo.NONE; case 1: return PersistTo.ONE; case 2: return PersistTo.TWO; case 3: return PersistTo.THREE; case 4: return PersistTo.FOUR; default: throw new DBException("\"couchbase.persistTo\" must be between 0 and 4"); } } /** * Decode the String from server and pass it into the decoded destination. * * @param source the loaded object. * @param fields the fields to check. * @param dest the result passed back to YCSB. */ private void decode(final String source, final Set fields, final HashMap dest) { try { JsonNode json = JacksonTransformers.MAPPER.readTree(source); boolean checkFields = fields != null && !fields.isEmpty(); for (Iterator> jsonFields = json.fields(); jsonFields.hasNext();) { Map.Entry jsonField = jsonFields.next(); String name = jsonField.getKey(); - if (checkFields && fields.contains(name)) { + if (checkFields && !fields.contains(name)) { continue; } JsonNode jsonValue = jsonField.getValue(); if (jsonValue != null && !jsonValue.isNull()) { dest.put(name, new StringByteIterator(jsonValue.asText())); } } } catch (Exception e) { throw new RuntimeException("Could not decode JSON"); } } /** * Encode the source into a String for storage. * * @param source the source value. * @return the encoded string. */ private String encode(final HashMap source) { HashMap stringMap = StringByteIterator.getStringMap(source); ObjectNode node = JacksonTransformers.MAPPER.createObjectNode(); for (Map.Entry pair : stringMap.entrySet()) { node.put(pair.getKey(), pair.getValue()); } JsonFactory jsonFactory = new JsonFactory(); Writer writer = new StringWriter(); try { JsonGenerator jsonGenerator = jsonFactory.createGenerator(writer); JacksonTransformers.MAPPER.writeTree(jsonGenerator, node); } catch (Exception e) { throw new RuntimeException("Could not encode JSON value"); } return writer.toString(); } } /** * Factory for the {@link BackoffSelectStrategy} to be used with boosting. */ class BackoffSelectStrategyFactory implements SelectStrategyFactory { @Override public SelectStrategy newSelectStrategy() { return new BackoffSelectStrategy(); } } /** * Custom IO select strategy which trades CPU for throughput, used with the boost setting. */ class BackoffSelectStrategy implements SelectStrategy { private int counter = 0; @Override public int calculateStrategy(final IntSupplier supplier, final boolean hasTasks) throws Exception { int selectNowResult = supplier.get(); if (hasTasks || selectNowResult != 0) { counter = 0; return selectNowResult; } counter++; if (counter > 2000) { LockSupport.parkNanos(1); } else if (counter > 3000) { Thread.yield(); } else if (counter > 4000) { LockSupport.parkNanos(1000); } else if (counter > 5000) { // defer to blocking select counter = 0; return SelectStrategy.SELECT; } return SelectStrategy.CONTINUE; } } diff --git a/distribution/pom.xml b/distribution/pom.xml index 5f04b4a5..d8af2145 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -1,217 +1,222 @@ 4.0.0 com.yahoo.ycsb root - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ycsb YCSB Release Distribution Builder pom This module creates the release package of the YCSB with all DB library bindings. It is only used by the build process and does not contain any real code of itself. com.yahoo.ycsb core ${project.version} com.yahoo.ycsb accumulo-binding ${project.version} com.yahoo.ycsb aerospike-binding ${project.version} + + com.yahoo.ycsb + arangodb-binding + ${project.version} + com.yahoo.ycsb asynchbase-binding ${project.version} com.yahoo.ycsb cassandra-binding ${project.version} com.yahoo.ycsb couchbase-binding ${project.version} com.yahoo.ycsb couchbase2-binding ${project.version} com.yahoo.ycsb dynamodb-binding ${project.version} com.yahoo.ycsb elasticsearch-binding ${project.version} com.yahoo.ycsb geode-binding ${project.version} com.yahoo.ycsb googledatastore-binding ${project.version} com.yahoo.ycsb googlebigtable-binding ${project.version} com.yahoo.ycsb hbase094-binding ${project.version} com.yahoo.ycsb hbase098-binding ${project.version} com.yahoo.ycsb hbase10-binding ${project.version} com.yahoo.ycsb hypertable-binding ${project.version} com.yahoo.ycsb infinispan-binding ${project.version} com.yahoo.ycsb jdbc-binding ${project.version} com.yahoo.ycsb kudu-binding ${project.version} com.yahoo.ycsb memcached-binding ${project.version} com.yahoo.ycsb mongodb-binding ${project.version} com.yahoo.ycsb nosqldb-binding ${project.version} com.yahoo.ycsb orientdb-binding ${project.version} com.yahoo.ycsb rados-binding ${project.version} com.yahoo.ycsb redis-binding ${project.version} com.yahoo.ycsb riak-binding ${project.version} com.yahoo.ycsb s3-binding ${project.version} com.yahoo.ycsb solr-binding ${project.version} com.yahoo.ycsb tarantool-binding ${project.version} org.apache.maven.plugins maven-assembly-plugin ${maven.assembly.version} src/main/assembly/distribution.xml false posix package single diff --git a/distribution/src/main/assembly/distribution.xml b/distribution/src/main/assembly/distribution.xml index 618cee5c..487e1e2d 100644 --- a/distribution/src/main/assembly/distribution.xml +++ b/distribution/src/main/assembly/distribution.xml @@ -1,98 +1,98 @@ package tar.gz true .. . 0644 README LICENSE.txt NOTICE.txt ../bin bin 0755 - ycsb + ycsb* ../workloads workloads 0644 lib com.yahoo.ycsb:core runtime false false true true true true com.yahoo.ycsb:core com.yahoo.ycsb:binding-parent com.yahoo.ycsb:datastore-specific-descriptor com.yahoo.ycsb:ycsb com.yahoo.ycsb:root README.md conf src/main/conf lib target/dependency false ${module.artifactId}/lib false diff --git a/dynamodb/pom.xml b/dynamodb/pom.xml index c293ce2b..6ed245ae 100644 --- a/dynamodb/pom.xml +++ b/dynamodb/pom.xml @@ -1,77 +1,77 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent dynamodb-binding DynamoDB DB Binding false com.amazonaws aws-java-sdk 1.10.48 log4j log4j 1.2.17 com.yahoo.ycsb core ${project.version} provided org.apache.maven.plugins maven-checkstyle-plugin 2.15 true ../checkstyle.xml true true validate validate checkstyle diff --git a/elasticsearch/README.md b/elasticsearch/README.md index 157ccec0..0e863c89 100644 --- a/elasticsearch/README.md +++ b/elasticsearch/README.md @@ -1,79 +1,94 @@ ## Quick Start This section describes how to run YCSB on Elasticsearch running locally. ### 1. Set Up YCSB Clone the YCSB git repository and compile: git clone git://github.com/brianfrankcooper/YCSB.git cd YCSB mvn clean package ### 2. Run YCSB Now you are ready to run! First, load the data: - ./bin/ycsb load elasticsearch -s -P workloads/workloada + ./bin/ycsb load elasticsearch -s -P workloads/workloada -p path.home= Then, run the workload: - ./bin/ycsb run elasticsearch -s -P workloads/workloada + ./bin/ycsb run elasticsearch -s -P workloads/workloada -p path.home= -For further configuration see below: +Note that the `` specified in each execution should be the same. + +The Elasticsearch binding has two modes of operation, embedded mode and remote +mode. In embedded mode, the client creates an embedded instance of +Elasticsearch that uses the specified `` to persist data between +executions. + +In remote mode, the client will hit a standalone instance of Elasticsearch. To +use remote mode, add the flags `-p es.remote=true` and specify a hosts list via +`-p es.hosts.list=,...,`. + + ./bin/ycsb run elasticsearch -s -P workloads/workloada -p es.remote=true \ + -p es.hosts.list=,...,` + +Note that `es.hosts.list` defaults to `localhost:9300`. For further +configuration see below: ### Defaults Configuration The default setting for the Elasticsearch node that is created is as follows: cluster.name=es.ycsb.cluster es.index.key=es.ycsb es.number_of_shards=1 es.number_of_replicas=0 es.remote=false es.newdb=false - es.hosts.list=localhost:9200 (only applies if es.remote=true) + es.hosts.list=localhost:9300 (only applies if es.remote=true) ### Custom Configuration If you wish to customize the settings used to create the Elasticsearch node you can created a new property file that contains your desired Elasticsearch node settings and pass it in via the parameter to 'bin/ycsb' script. Note that the default properties will be kept if you don't explicitly overwrite them. Assuming that we have a properties file named "myproperties.data" that contains custom Elasticsearch node configuration you can execute the following to pass it into the Elasticsearch client: ./bin/ycsb run elasticsearch -P workloads/workloada -P myproperties.data -s If you wish to change the default index name you can set the following property: es.index.key=my_index_key If you wish to run against a remote cluster you can set the following property: es.remote=true By default this will use localhost:9300 as a seed node to discover the cluster. You can also specify es.hosts.list=(\w+:\d+)+ (a comma-separated list of host/port pairs) to change this. diff --git a/elasticsearch/pom.xml b/elasticsearch/pom.xml index fc028291..5e7c4057 100644 --- a/elasticsearch/pom.xml +++ b/elasticsearch/pom.xml @@ -1,60 +1,60 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent elasticsearch-binding Elasticsearch Binding jar - 2.3.2 + 2.4.0 net.java.dev.jna jna 4.1.0 com.yahoo.ycsb core ${project.version} provided org.elasticsearch elasticsearch ${elasticsearch-version} - org.testng - testng - 6.1.1 + junit + junit + 4.12 test diff --git a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java index 6a95d9ce..76ddee1d 100644 --- a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java +++ b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java @@ -1,360 +1,369 @@ /** * Copyright (c) 2012 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import static org.elasticsearch.common.settings.Settings.Builder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.node.NodeBuilder.nodeBuilder; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.node.Node; import org.elasticsearch.search.SearchHit; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * Elasticsearch client for YCSB framework. * *

* Default properties to set: *

*
    *
  • cluster.name = es.ycsb.cluster *
  • es.index.key = es.ycsb *
  • es.number_of_shards = 1 *
  • es.number_of_replicas = 0 *
*/ public class ElasticsearchClient extends DB { private static final String DEFAULT_CLUSTER_NAME = "es.ycsb.cluster"; private static final String DEFAULT_INDEX_KEY = "es.ycsb"; private static final String DEFAULT_REMOTE_HOST = "localhost:9300"; private static final int NUMBER_OF_SHARDS = 1; private static final int NUMBER_OF_REPLICAS = 0; private Node node; private Client client; private String indexKey; private Boolean remoteMode; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { - Properties props = getProperties(); + final Properties props = getProperties(); + + // Check if transport client needs to be used (To connect to multiple + // elasticsearch nodes) + remoteMode = Boolean.parseBoolean(props.getProperty("es.remote", "false")); + + final String pathHome = props.getProperty("path.home"); + + // when running in embedded mode, require path.home + if (!remoteMode && (pathHome == null || pathHome.isEmpty())) { + throw new IllegalArgumentException("path.home must be specified when running in embedded mode"); + } + this.indexKey = props.getProperty("es.index.key", DEFAULT_INDEX_KEY); int numberOfShards = parseIntegerProperty(props, "es.number_of_shards", NUMBER_OF_SHARDS); int numberOfReplicas = parseIntegerProperty(props, "es.number_of_replicas", NUMBER_OF_REPLICAS); - // Check if transport client needs to be used (To connect to multiple - // elasticsearch nodes) - remoteMode = Boolean.parseBoolean(props.getProperty("es.remote", "false")); Boolean newdb = Boolean.parseBoolean(props.getProperty("es.newdb", "false")); Builder settings = Settings.settingsBuilder() .put("cluster.name", DEFAULT_CLUSTER_NAME) .put("node.local", Boolean.toString(!remoteMode)) - .put("path.home", System.getProperty("java.io.tmpdir")); + .put("path.home", pathHome); // if properties file contains elasticsearch user defined properties // add it to the settings file (will overwrite the defaults). settings.put(props); final String clusterName = settings.get("cluster.name"); System.err.println("Elasticsearch starting node = " + clusterName); System.err.println("Elasticsearch node path.home = " + settings.get("path.home")); System.err.println("Elasticsearch Remote Mode = " + remoteMode); // Remote mode support for connecting to remote elasticsearch cluster if (remoteMode) { settings.put("client.transport.sniff", true) .put("client.transport.ignore_cluster_name", false) .put("client.transport.ping_timeout", "30s") .put("client.transport.nodes_sampler_interval", "30s"); // Default it to localhost:9300 String[] nodeList = props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST).split(","); System.out.println("Elasticsearch Remote Hosts = " + props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST)); TransportClient tClient = TransportClient.builder().settings(settings).build(); for (String h : nodeList) { String[] nodes = h.split(":"); try { tClient.addTransportAddress(new InetSocketTransportAddress( InetAddress.getByName(nodes[0]), Integer.parseInt(nodes[1]) )); } catch (NumberFormatException e) { throw new IllegalArgumentException("Unable to parse port number.", e); } catch (UnknownHostException e) { throw new IllegalArgumentException("Unable to Identify host.", e); } } client = tClient; } else { // Start node only if transport client mode is disabled node = nodeBuilder().clusterName(clusterName).settings(settings).node(); node.start(); client = node.client(); } final boolean exists = client.admin().indices() .exists(Requests.indicesExistsRequest(indexKey)).actionGet() .isExists(); if (exists && newdb) { client.admin().indices().prepareDelete(indexKey).execute().actionGet(); } if (!exists || newdb) { client.admin().indices().create( new CreateIndexRequest(indexKey) .settings( Settings.builder() .put("index.number_of_shards", numberOfShards) .put("index.number_of_replicas", numberOfReplicas) .put("index.mapping._id.indexed", true) )).actionGet(); } client.admin().cluster().health(new ClusterHealthRequest().waitForGreenStatus()).actionGet(); } private int parseIntegerProperty(Properties properties, String key, int defaultValue) { String value = properties.getProperty(key); return value == null ? defaultValue : Integer.parseInt(value); } @Override public void cleanup() throws DBException { if (!remoteMode) { if (!node.isClosed()) { client.close(); node.close(); } } else { client.close(); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status insert(String table, String key, HashMap values) { try { final XContentBuilder doc = jsonBuilder().startObject(); for (Entry entry : StringByteIterator.getStringMap(values).entrySet()) { doc.field(entry.getKey(), entry.getValue()); } doc.endObject(); client.prepareIndex(indexKey, table, key).setSource(doc).execute().actionGet(); return Status.OK; } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { DeleteResponse response = client.prepareDelete(indexKey, table, key).execute().actionGet(); if (response.isFound()) { return Status.OK; } else { return Status.NOT_FOUND; } } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set fields, HashMap result) { try { final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet(); if (response.isExists()) { if (fields != null) { for (String field : fields) { result.put(field, new StringByteIterator( (String) response.getSource().get(field))); } } else { for (String field : response.getSource().keySet()) { result.put(field, new StringByteIterator( (String) response.getSource().get(field))); } } return Status.OK; } else { return Status.NOT_FOUND; } } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, HashMap values) { try { final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet(); if (response.isExists()) { for (Entry entry : StringByteIterator.getStringMap(values).entrySet()) { response.getSource().put(entry.getKey(), entry.getValue()); } client.prepareIndex(indexKey, table, key).setSource(response.getSource()).execute().actionGet(); return Status.OK; } else { return Status.NOT_FOUND; } } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status scan( String table, String startkey, int recordcount, Set fields, Vector> result) { try { final RangeQueryBuilder rangeQuery = rangeQuery("_id").gte(startkey); final SearchResponse response = client.prepareSearch(indexKey) .setTypes(table) .setQuery(rangeQuery) .setSize(recordcount) .execute() .actionGet(); HashMap entry; for (SearchHit hit : response.getHits()) { entry = new HashMap<>(fields.size()); for (String field : fields) { entry.put(field, new StringByteIterator((String) hit.getSource().get(field))); } result.add(entry); } return Status.OK; } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } } diff --git a/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java b/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java index 69e52ff6..d1ad64d1 100644 --- a/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java +++ b/elasticsearch/src/test/java/com/yahoo/ycsb/db/ElasticsearchClientTest.java @@ -1,149 +1,156 @@ /** * Copyright (c) 2012 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /* * To change this template, choose Tools | Templates * and open the template in the editor. */ package com.yahoo.ycsb.db; -import static org.testng.AssertJUnit.assertEquals; - import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; - -import org.testng.annotations.AfterClass; -import org.testng.annotations.AfterMethod; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; import java.util.HashMap; +import java.util.Properties; import java.util.Set; import java.util.Vector; +import static org.junit.Assert.assertEquals; + public class ElasticsearchClientTest { + @ClassRule public final static TemporaryFolder temp = new TemporaryFolder(); protected final static ElasticsearchClient instance = new ElasticsearchClient(); protected final static HashMap MOCK_DATA; protected final static String MOCK_TABLE = "MOCK_TABLE"; protected final static String MOCK_KEY0 = "0"; protected final static String MOCK_KEY1 = "1"; protected final static String MOCK_KEY2 = "2"; static { MOCK_DATA = new HashMap(10); for (int i = 1; i <= 10; i++) { MOCK_DATA.put("field" + i, new StringByteIterator("value" + i)); } } @BeforeClass public static void setUpClass() throws DBException { + final Properties props = new Properties(); + props.put("path.home", temp.getRoot().toString()); + instance.setProperties(props); instance.init(); } @AfterClass public static void tearDownClass() throws DBException { instance.cleanup(); } - @BeforeMethod + @Before public void setUp() { instance.insert(MOCK_TABLE, MOCK_KEY1, MOCK_DATA); instance.insert(MOCK_TABLE, MOCK_KEY2, MOCK_DATA); } - @AfterMethod + @After public void tearDown() { instance.delete(MOCK_TABLE, MOCK_KEY1); instance.delete(MOCK_TABLE, MOCK_KEY2); } /** * Test of insert method, of class ElasticsearchClient. */ @Test public void testInsert() { System.out.println("insert"); Status result = instance.insert(MOCK_TABLE, MOCK_KEY0, MOCK_DATA); assertEquals(Status.OK, result); } /** * Test of delete method, of class ElasticsearchClient. */ @Test public void testDelete() { System.out.println("delete"); Status result = instance.delete(MOCK_TABLE, MOCK_KEY1); assertEquals(Status.OK, result); } /** * Test of read method, of class ElasticsearchClient. */ @Test public void testRead() { System.out.println("read"); Set fields = MOCK_DATA.keySet(); HashMap resultParam = new HashMap(10); Status result = instance.read(MOCK_TABLE, MOCK_KEY1, fields, resultParam); assertEquals(Status.OK, result); } /** * Test of update method, of class ElasticsearchClient. */ @Test public void testUpdate() { System.out.println("update"); int i; HashMap newValues = new HashMap(10); for (i = 1; i <= 10; i++) { newValues.put("field" + i, new StringByteIterator("newvalue" + i)); } Status result = instance.update(MOCK_TABLE, MOCK_KEY1, newValues); assertEquals(Status.OK, result); //validate that the values changed HashMap resultParam = new HashMap(10); instance.read(MOCK_TABLE, MOCK_KEY1, MOCK_DATA.keySet(), resultParam); for (i = 1; i <= 10; i++) { assertEquals("newvalue" + i, resultParam.get("field" + i).toString()); } } /** * Test of scan method, of class ElasticsearchClient. */ @Test public void testScan() { System.out.println("scan"); int recordcount = 10; Set fields = MOCK_DATA.keySet(); Vector> resultParam = new Vector>(10); Status result = instance.scan(MOCK_TABLE, MOCK_KEY1, recordcount, fields, resultParam); assertEquals(Status.OK, result); } } diff --git a/geode/README.md b/geode/README.md index 9d4fe3a3..59690bfe 100644 --- a/geode/README.md +++ b/geode/README.md @@ -1,67 +1,68 @@ ## Quick Start This section describes how to run YCSB on Apache Geode (incubating). ### Get Apache Geode You can download Geode from http://geode.incubator.apache.org/releases/ #### Start Geode Cluster Use the Geode shell (gfsh) to start the cluster. You will need to start at-least one locator which is a member discovery service and one or more Geode servers. Launch gfsh: ``` $ cd $GEODE_HOME $ ./bin/gfsh ``` Start a locator and two servers: ``` gfsh> start locator --name=locator1 gfsh> start server --name=server1 --server-port=40404 gfsh> start server --name=server2 --server-port=40405 +gfsh> configure pdx --read-serialized=true ``` Create the "usertable" region required by YCSB driver: ``` gfsh>create region --name=usertable --type=PARTITION ``` gfsh has tab autocompletion, so you can play around with various options. ### Start YCSB workload From your YCSB directory, you can run the ycsb workload as follows ``` ./bin/ycsb load geode -P workloads/workloada -p geode.locator=host[port] ``` (default port of locator is 10334). In the default mode, ycsb geode driver will connect as a client to the geode cluster. To make the ycsb driver a peer member of the distributed system use the property `-p geode.topology=p2p -p geode.locator=host[port]` Note: For update workloads, please use the property `-p writeallfields=true` diff --git a/geode/pom.xml b/geode/pom.xml index bf8a65a8..4e20d9e2 100644 --- a/geode/pom.xml +++ b/geode/pom.xml @@ -1,74 +1,74 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent geode-binding Geode DB Binding jar false org.apache.geode geode-core ${geode.version} com.yahoo.ycsb core ${project.version} provided org.apache.maven.plugins maven-checkstyle-plugin 2.15 true ../checkstyle.xml true true validate validate checkstyle diff --git a/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java b/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java index f6bcc01a..603b7b73 100644 --- a/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java +++ b/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java @@ -1,191 +1,210 @@ /** * Copyright (c) 2013 - 2016 YCSB Contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.gemstone.gemfire.cache.*; import com.gemstone.gemfire.cache.client.ClientCache; import com.gemstone.gemfire.cache.client.ClientCacheFactory; import com.gemstone.gemfire.cache.client.ClientRegionFactory; import com.gemstone.gemfire.cache.client.ClientRegionShortcut; import com.gemstone.gemfire.internal.admin.remote.DistributionLocatorId; +import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; +import com.gemstone.gemfire.pdx.JSONFormatter; +import com.gemstone.gemfire.pdx.PdxInstance; +import com.gemstone.gemfire.pdx.PdxInstanceFactory; import com.yahoo.ycsb.*; import java.util.*; /** * Apache Geode (incubating) client for the YCSB benchmark.
*

By default acts as a Geode client and tries to connect * to Geode cache server running on localhost with default * cache server port. Hostname and port of a Geode cacheServer * can be provided using geode.serverport=port and * geode.serverhost=host properties on YCSB command line. * A locator may also be used for discovering a cacheServer * by using the property geode.locator=host[port]

- * + *

*

To run this client in a peer-to-peer topology with other Geode * nodes, use the property geode.topology=p2p. Running * in p2p mode will enable embedded caching in this client.

- * + *

*

YCSB by default does its operations against "usertable". When running * as a client this is a ClientRegionShortcut.PROXY region, * when running in p2p mode it is a RegionShortcut.PARTITION * region. A cache.xml defining "usertable" region can be placed in the * working directory to override these region definitions.

- * */ public class GeodeClient extends DB { - /** property name of the port where Geode server is listening for connections. */ + /** + * property name of the port where Geode server is listening for connections. + */ private static final String SERVERPORT_PROPERTY_NAME = "geode.serverport"; - /** property name of the host where Geode server is running. */ + /** + * property name of the host where Geode server is running. + */ private static final String SERVERHOST_PROPERTY_NAME = "geode.serverhost"; - /** default value of {@link #SERVERHOST_PROPERTY_NAME}. */ + /** + * default value of {@link #SERVERHOST_PROPERTY_NAME}. + */ private static final String SERVERHOST_PROPERTY_DEFAULT = "localhost"; - /** property name to specify a Geode locator. This property can be used in both - * client server and p2p topology */ + /** + * property name to specify a Geode locator. This property can be used in both + * client server and p2p topology + */ private static final String LOCATOR_PROPERTY_NAME = "geode.locator"; - /** property name to specify Geode topology. */ + /** + * property name to specify Geode topology. + */ private static final String TOPOLOGY_PROPERTY_NAME = "geode.topology"; - /** value of {@value #TOPOLOGY_PROPERTY_NAME} when peer to peer topology should be used. - * (client-server topology is default) */ + /** + * value of {@value #TOPOLOGY_PROPERTY_NAME} when peer to peer topology should be used. + * (client-server topology is default) + */ private static final String TOPOLOGY_P2P_VALUE = "p2p"; private GemFireCache cache; - /** true if ycsb client runs as a client to a Geode cache server. */ + /** + * true if ycsb client runs as a client to a Geode cache server. + */ private boolean isClient; @Override public void init() throws DBException { Properties props = getProperties(); // hostName where Geode cacheServer is running String serverHost = null; // port of Geode cacheServer int serverPort = 0; String locatorStr = null; if (props != null && !props.isEmpty()) { String serverPortStr = props.getProperty(SERVERPORT_PROPERTY_NAME); if (serverPortStr != null) { serverPort = Integer.parseInt(serverPortStr); } serverHost = props.getProperty(SERVERHOST_PROPERTY_NAME, SERVERHOST_PROPERTY_DEFAULT); locatorStr = props.getProperty(LOCATOR_PROPERTY_NAME); String topology = props.getProperty(TOPOLOGY_PROPERTY_NAME); if (topology != null && topology.equals(TOPOLOGY_P2P_VALUE)) { CacheFactory cf = new CacheFactory(); if (locatorStr != null) { cf.set("locators", locatorStr); } cache = cf.create(); isClient = false; return; } } isClient = true; DistributionLocatorId locator = null; if (locatorStr != null) { locator = new DistributionLocatorId(locatorStr); } ClientCacheFactory ccf = new ClientCacheFactory(); if (serverPort != 0) { ccf.addPoolServer(serverHost, serverPort); } else if (locator != null) { ccf.addPoolLocator(locator.getHost().getCanonicalHostName(), locator.getPort()); } cache = ccf.create(); } @Override public Status read(String table, String key, Set fields, HashMap result) { - Region> r = getRegion(table); - Map val = r.get(key); + Region r = getRegion(table); + PdxInstance val = r.get(key); if (val != null) { if (fields == null) { - for (Map.Entry entry : val.entrySet()) { - result.put(entry.getKey(), new ByteArrayByteIterator(entry.getValue())); + for (String fieldName : val.getFieldNames()) { + result.put(fieldName, new ByteArrayByteIterator((byte[]) val.getField(fieldName))); } } else { for (String field : fields) { - result.put(field, new ByteArrayByteIterator(val.get(field))); + result.put(field, new ByteArrayByteIterator((byte[]) val.getField(field))); } } return Status.OK; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { // Geode does not support scan return Status.ERROR; } @Override public Status update(String table, String key, HashMap values) { getRegion(table).put(key, convertToBytearrayMap(values)); return Status.OK; } @Override public Status insert(String table, String key, HashMap values) { getRegion(table).put(key, convertToBytearrayMap(values)); return Status.OK; } @Override public Status delete(String table, String key) { getRegion(table).destroy(key); return Status.OK; } - private Map convertToBytearrayMap(Map values) { - Map retVal = new HashMap(); + private PdxInstance convertToBytearrayMap(Map values) { + GemFireCacheImpl gci = (GemFireCacheImpl) CacheFactory.getAnyInstance(); + PdxInstanceFactory pdxInstanceFactory = gci.createPdxInstanceFactory(JSONFormatter.JSON_CLASSNAME); + for (Map.Entry entry : values.entrySet()) { - retVal.put(entry.getKey(), entry.getValue().toArray()); + pdxInstanceFactory.writeByteArray(entry.getKey(), entry.getValue().toArray()); } - return retVal; + return pdxInstanceFactory.create(); } - private Region> getRegion(String table) { - Region> r = cache.getRegion(table); + private Region getRegion(String table) { + Region r = cache.getRegion(table); if (r == null) { try { if (isClient) { - ClientRegionFactory> crf = + ClientRegionFactory crf = ((ClientCache) cache).createClientRegionFactory(ClientRegionShortcut.PROXY); r = crf.create(table); } else { - RegionFactory> rf = ((Cache) cache).createRegionFactory(RegionShortcut.PARTITION); + RegionFactory rf = ((Cache) cache).createRegionFactory(RegionShortcut.PARTITION); r = rf.create(table); } } catch (RegionExistsException e) { // another thread created the region r = cache.getRegion(table); } } return r; } } \ No newline at end of file diff --git a/googlebigtable/pom.xml b/googlebigtable/pom.xml index d6ade78b..a1c0e143 100644 --- a/googlebigtable/pom.xml +++ b/googlebigtable/pom.xml @@ -1,47 +1,47 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent/ googlebigtable-binding Google Cloud Bigtable Binding jar com.google.cloud.bigtable bigtable-hbase-1.0 ${googlebigtable.version} com.yahoo.ycsb core ${project.version} provided \ No newline at end of file diff --git a/googledatastore/pom.xml b/googledatastore/pom.xml index 3da08353..554193a6 100644 --- a/googledatastore/pom.xml +++ b/googledatastore/pom.xml @@ -1,50 +1,50 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent googledatastore-binding Google Cloud Datastore Binding https://github.com/GoogleCloudPlatform/google-cloud-datastore com.google.cloud.datastore - datastore-v1beta3-proto-client - 1.0.0-beta.1 + datastore-v1-proto-client + 1.1.0 log4j log4j 1.2.17 com.yahoo.ycsb core ${project.version} provided diff --git a/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java b/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java index a3f65534..7eb35b1e 100644 --- a/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java +++ b/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java @@ -1,335 +1,335 @@ /* * Copyright 2015 YCSB contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.google.api.client.auth.oauth2.Credential; -import com.google.datastore.v1beta3.*; -import com.google.datastore.v1beta3.CommitRequest.Mode; -import com.google.datastore.v1beta3.ReadOptions.ReadConsistency; -import com.google.datastore.v1beta3.client.Datastore; -import com.google.datastore.v1beta3.client.DatastoreException; -import com.google.datastore.v1beta3.client.DatastoreFactory; -import com.google.datastore.v1beta3.client.DatastoreHelper; -import com.google.datastore.v1beta3.client.DatastoreOptions; +import com.google.datastore.v1.*; +import com.google.datastore.v1.CommitRequest.Mode; +import com.google.datastore.v1.ReadOptions.ReadConsistency; +import com.google.datastore.v1.client.Datastore; +import com.google.datastore.v1.client.DatastoreException; +import com.google.datastore.v1.client.DatastoreFactory; +import com.google.datastore.v1.client.DatastoreHelper; +import com.google.datastore.v1.client.DatastoreOptions; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import org.apache.log4j.Level; import org.apache.log4j.Logger; import java.io.IOException; import java.security.GeneralSecurityException; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Vector; import javax.annotation.Nullable; /** * Google Cloud Datastore Client for YCSB. */ public class GoogleDatastoreClient extends DB { /** * Defines a MutationType used in this class. */ private enum MutationType { UPSERT, UPDATE, DELETE } /** * Defines a EntityGroupingMode enum used in this class. */ private enum EntityGroupingMode { ONE_ENTITY_PER_GROUP, MULTI_ENTITY_PER_GROUP } private static Logger logger = Logger.getLogger(GoogleDatastoreClient.class); // Read consistency defaults to "STRONG" per YCSB guidance. // User can override this via configure. private ReadConsistency readConsistency = ReadConsistency.STRONG; private EntityGroupingMode entityGroupingMode = EntityGroupingMode.ONE_ENTITY_PER_GROUP; private String rootEntityName; private Datastore datastore = null; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. */ @Override public void init() throws DBException { String debug = getProperties().getProperty("googledatastore.debug", null); if (null != debug && "true".equalsIgnoreCase(debug)) { logger.setLevel(Level.DEBUG); } // We need the following 3 essential properties to initialize datastore: // // - DatasetId, // - Path to private key file, // - Service account email address. String datasetId = getProperties().getProperty( "googledatastore.datasetId", null); if (datasetId == null) { throw new DBException( "Required property \"datasetId\" missing."); } String privateKeyFile = getProperties().getProperty( "googledatastore.privateKeyFile", null); if (privateKeyFile == null) { throw new DBException( "Required property \"privateKeyFile\" missing."); } String serviceAccountEmail = getProperties().getProperty( "googledatastore.serviceAccountEmail", null); if (serviceAccountEmail == null) { throw new DBException( "Required property \"serviceAccountEmail\" missing."); } // Below are properties related to benchmarking. String readConsistencyConfig = getProperties().getProperty( "googledatastore.readConsistency", null); if (readConsistencyConfig != null) { try { this.readConsistency = ReadConsistency.valueOf( readConsistencyConfig.trim().toUpperCase()); } catch (IllegalArgumentException e) { throw new DBException("Invalid read consistency specified: " + readConsistencyConfig + ". Expecting STRONG or EVENTUAL."); } } // // Entity Grouping Mode (googledatastore.entitygroupingmode), see // documentation in conf/googledatastore.properties. // String entityGroupingConfig = getProperties().getProperty( "googledatastore.entityGroupingMode", null); if (entityGroupingConfig != null) { try { this.entityGroupingMode = EntityGroupingMode.valueOf( entityGroupingConfig.trim().toUpperCase()); } catch (IllegalArgumentException e) { throw new DBException("Invalid entity grouping mode specified: " + entityGroupingConfig + ". Expecting ONE_ENTITY_PER_GROUP or " + "MULTI_ENTITY_PER_GROUP."); } } this.rootEntityName = getProperties().getProperty( "googledatastore.rootEntityName", "YCSB_ROOT_ENTITY"); try { // Setup the connection to Google Cloud Datastore with the credentials // obtained from the configure. DatastoreOptions.Builder options = new DatastoreOptions.Builder(); Credential credential = DatastoreHelper.getServiceAccountCredential( serviceAccountEmail, privateKeyFile); logger.info("Using JWT Service Account credential."); logger.info("DatasetID: " + datasetId + ", Service Account Email: " + serviceAccountEmail + ", Private Key File Path: " + privateKeyFile); datastore = DatastoreFactory.get().create( options.credential(credential).projectId(datasetId).build()); } catch (GeneralSecurityException exception) { throw new DBException("Security error connecting to the datastore: " + exception.getMessage(), exception); } catch (IOException exception) { throw new DBException("I/O error connecting to the datastore: " + exception.getMessage(), exception); } logger.info("Datastore client instance created: " + datastore.toString()); } @Override public Status read(String table, String key, Set fields, HashMap result) { LookupRequest.Builder lookupRequest = LookupRequest.newBuilder(); lookupRequest.addKeys(buildPrimaryKey(table, key)); lookupRequest.getReadOptionsBuilder().setReadConsistency( this.readConsistency); // Note above, datastore lookupRequest always reads the entire entity, it // does not support reading a subset of "fields" (properties) of an entity. logger.debug("Built lookup request as: " + lookupRequest.toString()); LookupResponse response = null; try { response = datastore.lookup(lookupRequest.build()); } catch (DatastoreException exception) { logger.error( String.format("Datastore Exception when reading (%s): %s %s", exception.getMessage(), exception.getMethodName(), exception.getCode())); // DatastoreException.getCode() returns an HTTP response code which we // will bubble up to the user as part of the YCSB Status "name". return new Status("ERROR-" + exception.getCode(), exception.getMessage()); } if (response.getFoundCount() == 0) { return new Status("ERROR-404", "Not Found, key is: " + key); } else if (response.getFoundCount() > 1) { // We only asked to lookup for one key, shouldn't have got more than one // entity back. Unexpected State. return Status.UNEXPECTED_STATE; } Entity entity = response.getFound(0).getEntity(); logger.debug("Read entity: " + entity.toString()); Map properties = entity.getProperties(); Set propertiesToReturn = (fields == null ? properties.keySet() : fields); for (String name : propertiesToReturn) { if (properties.containsKey(name)) { result.put(name, new StringByteIterator(properties.get(name) .getStringValue())); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { // TODO: Implement Scan as query on primary key. return Status.NOT_IMPLEMENTED; } @Override public Status update(String table, String key, HashMap values) { return doSingleItemMutation(table, key, values, MutationType.UPDATE); } @Override public Status insert(String table, String key, HashMap values) { // Use Upsert to allow overwrite of existing key instead of failing the // load (or run) just because the DB already has the key. // This is the same behavior as what other DB does here (such as // the DynamoDB client). return doSingleItemMutation(table, key, values, MutationType.UPSERT); } @Override public Status delete(String table, String key) { return doSingleItemMutation(table, key, null, MutationType.DELETE); } private Key.Builder buildPrimaryKey(String table, String key) { Key.Builder result = Key.newBuilder(); if (this.entityGroupingMode == EntityGroupingMode.MULTI_ENTITY_PER_GROUP) { // All entities are in side the same group when we are in this mode. result.addPath(Key.PathElement.newBuilder().setKind(table). setName(rootEntityName)); } return result.addPath(Key.PathElement.newBuilder().setKind(table) .setName(key)); } private Status doSingleItemMutation(String table, String key, @Nullable HashMap values, MutationType mutationType) { // First build the key. Key.Builder datastoreKey = buildPrimaryKey(table, key); // Build a commit request in non-transactional mode. // Single item mutation to google datastore // is always atomic and strongly consistent. Transaction is only necessary // for multi-item mutation, or Read-modify-write operation. CommitRequest.Builder commitRequest = CommitRequest.newBuilder(); commitRequest.setMode(Mode.NON_TRANSACTIONAL); if (mutationType == MutationType.DELETE) { commitRequest.addMutationsBuilder().setDelete(datastoreKey); } else { // If this is not for delete, build the entity. Entity.Builder entityBuilder = Entity.newBuilder(); entityBuilder.setKey(datastoreKey); for (Entry val : values.entrySet()) { entityBuilder.getMutableProperties() .put(val.getKey(), Value.newBuilder() .setStringValue(val.getValue().toString()).build()); } Entity entity = entityBuilder.build(); logger.debug("entity built as: " + entity.toString()); if (mutationType == MutationType.UPSERT) { commitRequest.addMutationsBuilder().setUpsert(entity); } else if (mutationType == MutationType.UPDATE){ commitRequest.addMutationsBuilder().setUpdate(entity); } else { throw new RuntimeException("Impossible MutationType, code bug."); } } try { datastore.commit(commitRequest.build()); logger.debug("successfully committed."); } catch (DatastoreException exception) { // Catch all Datastore rpc errors. // Log the exception, the name of the method called and the error code. logger.error( String.format("Datastore Exception when committing (%s): %s %s", exception.getMessage(), exception.getMethodName(), exception.getCode())); // DatastoreException.getCode() returns an HTTP response code which we // will bubble up to the user as part of the YCSB Status "name". return new Status("ERROR-" + exception.getCode(), exception.getMessage()); } return Status.OK; } } diff --git a/hbase094/pom.xml b/hbase094/pom.xml index e2531633..8ff88ac7 100644 --- a/hbase094/pom.xml +++ b/hbase094/pom.xml @@ -1,68 +1,68 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent/ hbase094-binding HBase 0.94.x DB Binding org.apache.hbase hbase ${hbase094.version} org.apache.hadoop hadoop-core 1.0.4 com.yahoo.ycsb hbase098-binding ${project.version} * * org.slf4j slf4j-simple 1.7.12 com.yahoo.ycsb core ${project.version} provided diff --git a/hbase098/pom.xml b/hbase098/pom.xml index bbb0aa6c..646e5f8d 100644 --- a/hbase098/pom.xml +++ b/hbase098/pom.xml @@ -1,48 +1,48 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent/ hbase098-binding HBase 0.98.x DB Binding false org.apache.hbase hbase-client ${hbase098.version} com.yahoo.ycsb core ${project.version} provided diff --git a/hbase10/pom.xml b/hbase10/pom.xml index d136c0ca..01db0ceb 100644 --- a/hbase10/pom.xml +++ b/hbase10/pom.xml @@ -1,56 +1,56 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent/ hbase10-binding HBase 1.0 DB Binding org.apache.hbase hbase-client ${hbase10.version} com.yahoo.ycsb core ${project.version} provided junit junit 4.12 test org.apache.hbase hbase-testing-util ${hbase10.version} test diff --git a/hypertable/pom.xml b/hypertable/pom.xml index 6b4e6e26..637effe0 100644 --- a/hypertable/pom.xml +++ b/hypertable/pom.xml @@ -1,85 +1,85 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent hypertable-binding Hypertable DB Binding jar false com.yahoo.ycsb core ${project.version} provided org.apache.thrift libthrift ${thrift.version} org.hypertable hypertable ${hypertable.version} org.apache.maven.plugins maven-checkstyle-plugin 2.15 true ../checkstyle.xml true true validate validate checkstyle clojars.org http://clojars.org/repo diff --git a/infinispan/pom.xml b/infinispan/pom.xml index eaf5dc5a..8e0177db 100644 --- a/infinispan/pom.xml +++ b/infinispan/pom.xml @@ -1,78 +1,78 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent infinispan-binding Infinispan DB Binding jar false org.infinispan infinispan-client-hotrod ${infinispan.version} org.infinispan infinispan-core ${infinispan.version} com.yahoo.ycsb core ${project.version} provided org.apache.maven.plugins maven-checkstyle-plugin 2.15 true ../checkstyle.xml true true validate validate checkstyle diff --git a/jdbc/pom.xml b/jdbc/pom.xml index 4a8546d7..fad308f1 100644 --- a/jdbc/pom.xml +++ b/jdbc/pom.xml @@ -1,57 +1,57 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent jdbc-binding JDBC DB Binding jar org.apache.openjpa openjpa-jdbc ${openjpa.jdbc.version} com.yahoo.ycsb core ${project.version} provided junit junit 4.12 test org.hsqldb hsqldb 2.3.3 test diff --git a/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBCreateTable.java b/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBCreateTable.java index fbeba3c8..8dfb26dd 100644 --- a/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBCreateTable.java +++ b/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBCreateTable.java @@ -1,224 +1,224 @@ /** * Copyright (c) 2010 - 2016 Yahoo! Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import java.io.FileInputStream; import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.sql.Statement; import java.util.Enumeration; import java.util.Properties; /** * Utility class to create the table to be used by the benchmark. * * @author sudipto */ public final class JdbcDBCreateTable { private static void usageMessage() { System.out.println("Create Table Client. Options:"); System.out.println(" -p key=value properties defined."); System.out.println(" -P location of the properties file to load."); System.out.println(" -n name of the table."); System.out.println(" -f number of fields (default 10)."); } private static void createTable(Properties props, String tablename) throws SQLException { String driver = props.getProperty(JdbcDBClient.DRIVER_CLASS); String username = props.getProperty(JdbcDBClient.CONNECTION_USER); String password = props.getProperty(JdbcDBClient.CONNECTION_PASSWD, ""); String url = props.getProperty(JdbcDBClient.CONNECTION_URL); int fieldcount = Integer.parseInt(props.getProperty(JdbcDBClient.FIELD_COUNT_PROPERTY, JdbcDBClient.FIELD_COUNT_PROPERTY_DEFAULT)); if (driver == null || username == null || url == null) { throw new SQLException("Missing connection information."); } Connection conn = null; try { Class.forName(driver); conn = DriverManager.getConnection(url, username, password); Statement stmt = conn.createStatement(); StringBuilder sql = new StringBuilder("DROP TABLE IF EXISTS "); sql.append(tablename); sql.append(";"); stmt.execute(sql.toString()); sql = new StringBuilder("CREATE TABLE "); sql.append(tablename); - sql.append(" (KEY VARCHAR PRIMARY KEY"); + sql.append(" (YCSB_KEY VARCHAR PRIMARY KEY"); for (int idx = 0; idx < fieldcount; idx++) { sql.append(", FIELD"); sql.append(idx); sql.append(" VARCHAR"); } sql.append(");"); stmt.execute(sql.toString()); System.out.println("Table " + tablename + " created.."); } catch (ClassNotFoundException e) { throw new SQLException("JDBC Driver class not found."); } finally { if (conn != null) { System.out.println("Closing database connection."); conn.close(); } } } /** * @param args */ public static void main(String[] args) { if (args.length == 0) { usageMessage(); System.exit(0); } String tablename = null; int fieldcount = -1; Properties props = new Properties(); Properties fileprops = new Properties(); // parse arguments int argindex = 0; while (args[argindex].startsWith("-")) { if (args[argindex].compareTo("-P") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } String propfile = args[argindex]; argindex++; Properties myfileprops = new Properties(); try { myfileprops.load(new FileInputStream(propfile)); } catch (IOException e) { System.out.println(e.getMessage()); System.exit(0); } // Issue #5 - remove call to stringPropertyNames to make compilable // under Java 1.5 for (Enumeration e = myfileprops.propertyNames(); e.hasMoreElements();) { String prop = (String) e.nextElement(); fileprops.setProperty(prop, myfileprops.getProperty(prop)); } } else if (args[argindex].compareTo("-p") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } int eq = args[argindex].indexOf('='); if (eq < 0) { usageMessage(); System.exit(0); } String name = args[argindex].substring(0, eq); String value = args[argindex].substring(eq + 1); props.put(name, value); argindex++; } else if (args[argindex].compareTo("-n") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } tablename = args[argindex++]; } else if (args[argindex].compareTo("-f") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } try { fieldcount = Integer.parseInt(args[argindex++]); } catch (NumberFormatException e) { System.err.println("Invalid number for field count"); usageMessage(); System.exit(1); } } else { System.out.println("Unknown option " + args[argindex]); usageMessage(); System.exit(0); } if (argindex >= args.length) { break; } } if (argindex != args.length) { usageMessage(); System.exit(0); } // overwrite file properties with properties from the command line // Issue #5 - remove call to stringPropertyNames to make compilable under // Java 1.5 for (Enumeration e = props.propertyNames(); e.hasMoreElements();) { String prop = (String) e.nextElement(); fileprops.setProperty(prop, props.getProperty(prop)); } props = fileprops; if (tablename == null) { System.err.println("table name missing."); usageMessage(); System.exit(1); } if (fieldcount > 0) { props.setProperty(JdbcDBClient.FIELD_COUNT_PROPERTY, String.valueOf(fieldcount)); } try { createTable(props, tablename); } catch (SQLException e) { System.err.println("Error in creating table. " + e); System.exit(1); } } /** * Hidden constructor. */ private JdbcDBCreateTable() { super(); } } diff --git a/kudu/pom.xml b/kudu/pom.xml index b77c0104..9d3746e7 100644 --- a/kudu/pom.xml +++ b/kudu/pom.xml @@ -1,58 +1,58 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent kudu-binding Kudu DB Binding jar org.kududb kudu-client ${kudu.version} com.yahoo.ycsb core ${project.version} provided true false cloudera-repo Cloudera Releases https://repository.cloudera.com/artifactory/cloudera-repos diff --git a/mapkeeper/pom.xml b/mapkeeper/pom.xml index 07540bd3..202c8108 100644 --- a/mapkeeper/pom.xml +++ b/mapkeeper/pom.xml @@ -1,52 +1,52 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent mapkeeper-binding Mapkeeper DB Binding jar com.yahoo.mapkeeper mapkeeper ${mapkeeper.version} com.yahoo.ycsb core ${project.version} provided mapkeeper-releases https://raw.github.com/m1ch1/m1ch1-mvn-repo/master/releases diff --git a/memcached/pom.xml b/memcached/pom.xml index 88ae064c..1a3b0b9f 100644 --- a/memcached/pom.xml +++ b/memcached/pom.xml @@ -1,78 +1,78 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent memcached-binding memcached binding jar log4j log4j 1.2.17 com.yahoo.ycsb core ${project.version} org.codehaus.jackson jackson-mapper-asl 1.9.13 net.spy spymemcached 2.11.4 org.apache.maven.plugins maven-assembly-plugin ${maven.assembly.version} jar-with-dependencies false package single diff --git a/mongodb/pom.xml b/mongodb/pom.xml index b8e85dc7..c4ef745d 100644 --- a/mongodb/pom.xml +++ b/mongodb/pom.xml @@ -1,82 +1,82 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent mongodb-binding MongoDB Binding jar org.mongodb mongo-java-driver ${mongodb.version} com.allanbank mongodb-async-driver ${mongodb.async.version} com.yahoo.ycsb core ${project.version} provided ch.qos.logback logback-classic 1.1.2 runtime junit junit 4.12 test true always warn false never fail allanbank Allanbank Releases http://www.allanbank.com/repo/ default diff --git a/mongodb/src/main/resources/logback.xml b/mongodb/src/main/resources/logback.xml index dfbfeb2b..73354e06 100644 --- a/mongodb/src/main/resources/logback.xml +++ b/mongodb/src/main/resources/logback.xml @@ -1,32 +1,32 @@ + - %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n diff --git a/nosqldb/pom.xml b/nosqldb/pom.xml index 505a0faa..75603bf1 100644 --- a/nosqldb/pom.xml +++ b/nosqldb/pom.xml @@ -1,45 +1,45 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent nosqldb-binding Oracle NoSQL Database Binding jar com.oracle.kv oracle-nosql-client 3.0.5 com.yahoo.ycsb core ${project.version} provided diff --git a/orientdb/pom.xml b/orientdb/pom.xml index 54ae491b..d7a91aa7 100644 --- a/orientdb/pom.xml +++ b/orientdb/pom.xml @@ -1,62 +1,62 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent orientdb-binding OrientDB Binding jar sonatype-nexus-snapshots Sonatype Nexus Snapshots https://oss.sonatype.org/content/repositories/snapshots com.yahoo.ycsb core ${project.version} provided com.orientechnologies orientdb-client ${orientdb.version} junit junit 4.12 test org.slf4j slf4j-log4j12 1.7.10 diff --git a/pom.xml b/pom.xml index 8109f5f0..5357142b 100644 --- a/pom.xml +++ b/pom.xml @@ -1,179 +1,178 @@ 4.0.0 com.yahoo.ycsb root - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT pom YCSB Root This is the top level project that builds, packages the core and all the DB bindings for YCSB infrastructure. scm:git:git://github.com/brianfrankcooper/YCSB.git master https://github.com/brianfrankcooper/YCSB checkstyle checkstyle 5.0 org.jdom jdom 1.1 com.google.collections google-collections 1.0 org.slf4j slf4j-api 1.6.4 2.5.5 2.10 1.7.1 0.94.27 0.98.14-hadoop2 1.0.2 1.6.0 - 1.2.9 - 1.0.3 - 3.0.0 - 1.0.0-incubating.M2 + 3.0.0 + 1.0.0-incubating.M3 0.2.3 7.2.2.Final 0.9.0 2.1.1 3.0.3 2.0.1 2.1.8 2.0.0 1.10.20 0.81 UTF-8 0.8.0 0.9.5.6 1.4.10 - 2.2.6 + 2.3.1 1.6.5 2.0.5 3.1.2 5.4.0 + 2.7.3 core binding-parent accumulo aerospike + arangodb asynchbase cassandra - cassandra2 couchbase couchbase2 distribution dynamodb elasticsearch geode googlebigtable googledatastore hbase094 hbase098 hbase10 hypertable infinispan jdbc kudu memcached mongodb nosqldb orientdb rados redis riak s3 solr tarantool org.apache.maven.plugins maven-checkstyle-plugin 2.15 org.apache.maven.plugins maven-compiler-plugin 3.3 1.7 1.7 org.apache.maven.plugins maven-checkstyle-plugin validate validate check checkstyle.xml diff --git a/rados/pom.xml b/rados/pom.xml index 85d15af7..3676f270 100644 --- a/rados/pom.xml +++ b/rados/pom.xml @@ -1,67 +1,67 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent rados-binding rados of Ceph FS binding jar com.ceph rados ${rados.version} com.yahoo.ycsb core ${project.version} provided org.json json ${json.version} net.java.dev.jna jna 4.2.2 junit junit 4.12 test 0.2.0 20160212 diff --git a/redis/README.md b/redis/README.md index bc883271..27fe0ca8 100644 --- a/redis/README.md +++ b/redis/README.md @@ -1,56 +1,56 @@ ## Quick Start -This section describes how to run YCSB on Redis. +This section describes how to run YCSB on MongoDB. ### 1. Start Redis ### 2. Install Java and Maven ### 3. Set Up YCSB Git clone YCSB and compile: git clone http://github.com/brianfrankcooper/YCSB.git cd YCSB mvn -pl com.yahoo.ycsb:redis-binding -am clean package ### 4. Provide Redis Connection Parameters Set the host, port, and password (do not redis auth is not turned on) in the workload you plan to run. - `redis.host` - `redis.port` - `redis.password` Or, you can set configs with the shell command, EG: ./bin/ycsb load redis -s -P workloads/workloada -p "redis.host=127.0.0.1" -p "redis.port=6379" > outputLoad.txt ### 5. Load data and run tests Load the data: ./bin/ycsb load redis -s -P workloads/workloada > outputLoad.txt Run the workload test: ./bin/ycsb run redis -s -P workloads/workloada > outputRun.txt diff --git a/redis/pom.xml b/redis/pom.xml index 866732b3..6e6fbe38 100644 --- a/redis/pom.xml +++ b/redis/pom.xml @@ -1,45 +1,45 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent redis-binding Redis DB Binding jar redis.clients jedis ${redis.version} com.yahoo.ycsb core ${project.version} provided diff --git a/riak/pom.xml b/riak/pom.xml index 1e454832..a4b01529 100644 --- a/riak/pom.xml +++ b/riak/pom.xml @@ -1,59 +1,59 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent riak-binding Riak KV Binding jar com.basho.riak riak-client 2.0.5 com.yahoo.ycsb core ${project.version} provided com.google.collections google-collections 1.0 junit junit 4.12 test \ No newline at end of file diff --git a/s3/pom.xml b/s3/pom.xml index e934a198..fe653188 100644 --- a/s3/pom.xml +++ b/s3/pom.xml @@ -1,44 +1,44 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent s3-binding S3 Storage Binding jar com.amazonaws aws-java-sdk-s3 ${s3.version} com.yahoo.ycsb core ${project.version} provided diff --git a/solr/pom.xml b/solr/pom.xml index 0fc534dd..4ce44ed1 100644 --- a/solr/pom.xml +++ b/solr/pom.xml @@ -1,59 +1,59 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent solr-binding Solr Binding jar com.yahoo.ycsb core ${project.version} provided org.apache.solr solr-solrj ${solr.version} org.slf4j slf4j-log4j12 1.7.10 org.apache.solr solr-test-framework ${solr.version} test diff --git a/tarantool/pom.xml b/tarantool/pom.xml index 7d4a3c45..78f81ee6 100644 --- a/tarantool/pom.xml +++ b/tarantool/pom.xml @@ -1,75 +1,75 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent/ tarantool-binding Tarantool DB Binding jar false org.tarantool connector ${tarantool.version} com.yahoo.ycsb core ${project.version} provided org.apache.maven.plugins maven-checkstyle-plugin 2.15 true ../checkstyle.xml true true validate validate checkstyle diff --git a/voldemort/pom.xml b/voldemort/pom.xml index c2bc2ccf..1ac66995 100644 --- a/voldemort/pom.xml +++ b/voldemort/pom.xml @@ -1,51 +1,51 @@ 4.0.0 com.yahoo.ycsb binding-parent - 0.11.0-SNAPSHOT + 0.12.0-SNAPSHOT ../binding-parent voldemort-binding Voldemort DB Binding jar voldemort voldemort ${voldemort.version} log4j log4j 1.2.16 com.yahoo.ycsb core ${project.version} provided