diff --git a/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java b/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java index 41d6f7f6..28dbbd13 100644 --- a/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java +++ b/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java @@ -1,371 +1,371 @@ /** * Copyright (c) 2011 YCSB++ project, 2014-2016 YCSB contributors. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.accumulo; import static java.nio.charset.StandardCharsets.UTF_8; import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.SortedMap; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.client.BatchWriter; import org.apache.accumulo.core.client.BatchWriterConfig; import org.apache.accumulo.core.client.Connector; import org.apache.accumulo.core.client.IteratorSetting; import org.apache.accumulo.core.client.MutationsRejectedException; import org.apache.accumulo.core.client.Scanner; import org.apache.accumulo.core.client.TableNotFoundException; import org.apache.accumulo.core.client.ZooKeeperInstance; import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; import org.apache.accumulo.core.client.security.tokens.PasswordToken; import org.apache.accumulo.core.data.Key; import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; import org.apache.accumulo.core.iterators.user.WholeRowIterator; import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.util.CleanUp; import org.apache.hadoop.io.Text; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; /** * Accumulo binding for YCSB. */ public class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance( getProperties().getProperty("accumulo.instanceName"), getProperties().getProperty("accumulo.zooKeepers")); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry entry : scanner) { // Deserialize the row SortedMap row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, - HashMap values) { + Map values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, - HashMap values) { + Map values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } } diff --git a/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java b/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java index e651a7e6..bd3b6cf0 100644 --- a/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java +++ b/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java @@ -1,183 +1,183 @@ /** * Copyright (c) 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.aerospike.client.AerospikeException; import com.aerospike.client.Bin; import com.aerospike.client.Key; import com.aerospike.client.Record; import com.aerospike.client.policy.ClientPolicy; import com.aerospike.client.policy.Policy; import com.aerospike.client.policy.RecordExistsAction; import com.aerospike.client.policy.WritePolicy; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * YCSB binding for Areospike. */ public class AerospikeClient extends com.yahoo.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set fields, Vector> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, - HashMap values) { + Map values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, - HashMap values) { + Map values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } } diff --git a/arangodb/src/main/java/com/yahoo/ycsb/db/ArangoDBClient.java b/arangodb/src/main/java/com/yahoo/ycsb/db/ArangoDBClient.java index 1a9d185f..838f944a 100644 --- a/arangodb/src/main/java/com/yahoo/ycsb/db/ArangoDBClient.java +++ b/arangodb/src/main/java/com/yahoo/ycsb/db/ArangoDBClient.java @@ -1,466 +1,466 @@ /** * Copyright (c) 2012 - 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.arangodb.ArangoConfigure; import com.arangodb.ArangoDriver; import com.arangodb.ArangoException; import com.arangodb.ArangoHost; import com.arangodb.DocumentCursor; import com.arangodb.ErrorNums; import com.arangodb.entity.BaseDocument; import com.arangodb.entity.DocumentEntity; import com.arangodb.entity.EntityFactory; import com.arangodb.entity.TransactionEntity; import com.arangodb.util.MapBuilder; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.StringByteIterator; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.util.concurrent.atomic.AtomicInteger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * ArangoDB binding for YCSB framework using the ArangoDB Inc. driver *

* See the README.md for configuration information. *

* * @see ArangoDB Inc. * driver */ public class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * The database name to access. */ private static String databaseName = "ycsb"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private static ArangoDriver arangoDriver; private static Boolean dropDBBeforeRun; private static Boolean waitForSync = true; private static Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (ArangoDBClient.class) { if (arangoDriver != null) { return; } Properties props = getProperties(); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { ArangoConfigure arangoConfigure = new ArangoConfigure(); arangoConfigure.setArangoHost(new ArangoHost(ip, port)); arangoConfigure.init(); arangoDriver = new ArangoDriver(arangoConfigure); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDriver.deleteDatabase(databaseName); } catch (ArangoException e) { if (e.getErrorNumber() != ErrorNums.ERROR_ARANGO_DATABASE_NOT_FOUND) { logger.error("Failed to delete database: {} with ex: {}", databaseName, e.toString()); System.exit(-1); } else { logger.info("Fail to delete DB, already deleted: {}", databaseName); } } } try { arangoDriver.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoException e) { if (e.getErrorNumber() != ErrorNums.ERROR_ARANGO_DUPLICATE_NAME) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); System.exit(-1); } else { logger.info("DB already exists: {}", databaseName); } } // Always set the default db arangoDriver.setDefaultDatabase(databaseName); logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDriver = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } arangoDriver.createDocument(table, toInsert, true/*create collection if not exist*/, waitForSync); return Status.OK; } catch (ArangoException e) { if (e.getErrorNumber() != ErrorNums.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) { logger.error("Fail to insert: {} {} with ex {}", table, key, e.toString()); } else { logger.debug("Trying to create document with duplicate key: {} {}", table, key); return Status.BAD_REQUEST; } } catch (RuntimeException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @SuppressWarnings("unchecked") @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { DocumentEntity targetDoc = arangoDriver.getDocument(table, key, BaseDocument.class); BaseDocument aDocument = targetDoc.getEntity(); if (!this.fillMap(result, aDocument.getProperties(), fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoException e) { if (e.getErrorNumber() != ErrorNums.ERROR_ARANGO_DOCUMENT_NOT_FOUND) { logger.error("Fail to read: {} {} with ex {}", table, key, e.toString()); } else { logger.debug("Trying to read document not exist: {} {}", table, key); return Status.NOT_FOUND; } } catch (RuntimeException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (String field : values.keySet()) { updateDoc.addAttribute(field, byteIteratorToString(values.get(field))); } arangoDriver.updateDocument(table, key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionEntity transaction = arangoDriver.createTransaction(transactionAction); transaction.addWriteCollection(table); transaction.setParams(createDocumentHandle(table, key)); arangoDriver.executeTransaction(transaction); return Status.OK; } } catch (ArangoException e) { if (e.getErrorNumber() != ErrorNums.ERROR_ARANGO_DOCUMENT_NOT_FOUND) { logger.error("Fail to update: {} {} with ex {}", table, key, e.toString()); } else { logger.debug("Trying to update document not exist: {} {}", table, key); return Status.NOT_FOUND; } } catch (RuntimeException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDriver.deleteDocument(table, key); return Status.OK; } catch (ArangoException e) { if (e.getErrorNumber() != ErrorNums.ERROR_ARANGO_DOCUMENT_NOT_FOUND) { logger.error("Fail to delete: {} {} with ex {}", table, key, e.toString()); } else { logger.debug("Trying to delete document not exist: {} {}", table, key); return Status.NOT_FOUND; } } catch (RuntimeException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { DocumentCursor cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDriver.executeDocumentQuery(aqlQuery, bindVars, null, BaseDocument.class); Iterator iterator = cursor.entityIterator(); while (iterator.hasNext()) { BaseDocument aDocument = iterator.next(); HashMap aMap = new HashMap(aDocument.getProperties().size()); if (!this.fillMap(aMap, aDocument.getProperties())) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (ArangoException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collectionName, String documentKey) throws ArangoException { validateCollectionName(collectionName); return collectionName + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoException { if (name.indexOf('/') != -1) { throw new ArangoException("does not allow '/' in name."); } } private String constructReturnForAQL(Set fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map resultMap, Map properties) { return fillMap(resultMap, properties, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param obj * The object to copy values from. * @return isSuccess */ @SuppressWarnings("unchecked") private boolean fillMap(Map resultMap, Map properties, Set fields) { if (fields == null || fields.size() == 0) { for (Map.Entry entry : properties.entrySet()) { if (entry.getValue() instanceof String) { resultMap.put(entry.getKey(), stringToByteIterator((String)(entry.getValue()))); } else { logger.error("Error! Not the format expected! Actually is {}", entry.getValue().getClass().getName()); return false; } } } else { for (String field : fields) { if (properties.get(field) instanceof String) { resultMap.put(field, stringToByteIterator((String)(properties.get(field)))); } else { logger.error("Error! Not the format expected! Actually is {}", properties.get(field).getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } - private String mapToJson(HashMap values) { - HashMap intervalRst = new HashMap(); + private String mapToJson(Map values) { + Map intervalRst = new HashMap(); for (Map.Entry entry : values.entrySet()) { intervalRst.put(entry.getKey(), byteIteratorToString(entry.getValue())); } return EntityFactory.toJsonString(intervalRst); } } diff --git a/arangodb3/src/main/java/com/yahoo/ycsb/db/arangodb/ArangoDB3Client.java b/arangodb3/src/main/java/com/yahoo/ycsb/db/arangodb/ArangoDB3Client.java index 4ab5b9fa..1a02624d 100644 --- a/arangodb3/src/main/java/com/yahoo/ycsb/db/arangodb/ArangoDB3Client.java +++ b/arangodb3/src/main/java/com/yahoo/ycsb/db/arangodb/ArangoDB3Client.java @@ -1,426 +1,426 @@ /** * Copyright (c) 2017 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.arangodb; import java.io.IOException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicInteger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.arangodb.ArangoCursor; import com.arangodb.ArangoDB; import com.arangodb.ArangoDBException; import com.arangodb.entity.BaseDocument; import com.arangodb.model.DocumentCreateOptions; import com.arangodb.model.TransactionOptions; import com.arangodb.util.MapBuilder; import com.arangodb.velocypack.VPackBuilder; import com.arangodb.velocypack.VPackSlice; import com.arangodb.velocypack.ValueType; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; /** * ArangoDB binding for YCSB framework using the ArangoDB Inc. driver *

* See the README.md for configuration information. *

* * @see ArangoDB Inc. * driver */ public class ArangoDB3Client extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDB3Client.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDB3Client.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { ArangoCursor cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap aMap = new HashMap(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map resultMap, VPackSlice document, Set fields) { if (fields == null || fields.size() == 0) { for (Iterator> iterator = document.objectIterator(); iterator.hasNext();) { Entry next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } - private String mapToJson(HashMap values) { + private String mapToJson(Map values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } } diff --git a/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java b/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java index eecbee36..ac98c7d6 100644 --- a/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java +++ b/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java @@ -1,412 +1,413 @@ /** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import java.io.IOException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Vector; import org.hbase.async.Bytes; import org.hbase.async.Config; import org.hbase.async.DeleteRequest; import org.hbase.async.GetRequest; import org.hbase.async.HBaseClient; import org.hbase.async.KeyValue; import org.hbase.async.PutRequest; import org.hbase.async.Scanner; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; /** * Alternative Java client for Apache HBase. * * This client provides a subset of the main HBase client and uses a completely * asynchronous pipeline for all calls. It is particularly useful for write heavy * workloads. It is also compatible with all production versions of HBase. */ public class AsyncHBaseClient extends com.yahoo.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList row : rows) { final HashMap rowResult = new HashMap(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, - HashMap values) { + Map values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { return update(table, key, values); } @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } } \ No newline at end of file diff --git a/azuredocumentdb/src/main/java/com/yahoo/ycsb/db/azuredocumentdb/AzureDocumentDBClient.java b/azuredocumentdb/src/main/java/com/yahoo/ycsb/db/azuredocumentdb/AzureDocumentDBClient.java index ee9e9647..48b6690b 100644 --- a/azuredocumentdb/src/main/java/com/yahoo/ycsb/db/azuredocumentdb/AzureDocumentDBClient.java +++ b/azuredocumentdb/src/main/java/com/yahoo/ycsb/db/azuredocumentdb/AzureDocumentDBClient.java @@ -1,257 +1,258 @@ /* * Copyright 2016 YCSB Contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License * for the specific language governing permissions and limitations under * the License. */ package com.yahoo.ycsb.db.azuredocumentdb; import com.yahoo.ycsb.*; import com.microsoft.azure.documentdb.ConnectionPolicy; import com.microsoft.azure.documentdb.ConsistencyLevel; import com.microsoft.azure.documentdb.Database; import com.microsoft.azure.documentdb.Document; import com.microsoft.azure.documentdb.DocumentClient; import com.microsoft.azure.documentdb.DocumentClientException; import com.microsoft.azure.documentdb.DocumentCollection; import com.microsoft.azure.documentdb.FeedOptions; import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Vector; import java.util.List; /** * Azure DocumentDB client binding. */ public class AzureDocumentDBClient extends DB { private static String host; private static String masterKey; private static String databaseId; private static Database database; private static DocumentClient documentClient; private static DocumentCollection collection; private static FeedOptions feedOptions; @Override public void init() throws DBException { host = getProperties().getProperty("documentdb.host", null); masterKey = getProperties().getProperty("documentdb.masterKey", null); if (host == null) { System.err.println("ERROR: 'documentdb.host' must be set!"); System.exit(1); } if (masterKey == null) { System.err.println("ERROR: 'documentdb.masterKey' must be set!"); System.exit(1); } databaseId = getProperties().getProperty("documentdb.databaseId", "ycsb"); String collectionId = getProperties().getProperty("documentdb.collectionId", "usertable"); documentClient = new DocumentClient(host, masterKey, ConnectionPolicy.GetDefault(), ConsistencyLevel.Session); try { // Initialize test database and collection. collection = getCollection(collectionId); } catch (DocumentClientException e) { throw new DBException("Initialze collection failed", e); } feedOptions = new FeedOptions(); feedOptions.setEmitVerboseTracesInQuery(false); } @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { Document record = getDocumentById(table, key); if (record != null) { Set fieldsToReturn = (fields == null ? record.getHashMap().keySet() : fields); for (String field : fieldsToReturn) { if (field.startsWith("_")) { continue; } result.put(field, new StringByteIterator(record.getString(field))); } return Status.OK; } // Unable to find the specidifed document. return Status.ERROR; } @Override public Status update(String table, String key, - HashMap values) { + Map values) { Document record = getDocumentById(table, key); if (record == null) { return Status.ERROR; } // Update each field. for (Entry val : values.entrySet()) { record.set(val.getKey(), val.getValue().toString()); } // Replace the document. try { documentClient.replaceDocument(record, null); } catch (DocumentClientException e) { e.printStackTrace(System.err); return Status.ERROR; } return Status.OK; } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { Document record = new Document(); record.set("id", key); for (Entry val : values.entrySet()) { record.set(val.getKey(), val.getValue().toString()); } try { documentClient.createDocument(collection.getSelfLink(), record, null, false); } catch (DocumentClientException e) { e.printStackTrace(System.err); return Status.ERROR; } return Status.OK; } @Override public Status delete(String table, String key) { Document record = getDocumentById(table, key); try { // Delete the document by self link. documentClient.deleteDocument(record.getSelfLink(), null); } catch (DocumentClientException e) { e.printStackTrace(); return Status.ERROR; } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { // TODO: Implement Scan as query on primary key. return Status.NOT_IMPLEMENTED; } private Database getDatabase() { if (database == null) { // Get the database if it exists List databaseList = documentClient .queryDatabases( "SELECT * FROM root r WHERE r.id='" + databaseId + "'", null) .getQueryIterable() .toList(); if (databaseList.size() > 0) { // Cache the database object so we won't have to query for it // later to retrieve the selfLink. database = databaseList.get(0); } else { // Create the database if it doesn't exist. try { Database databaseDefinition = new Database(); databaseDefinition.setId(databaseId); database = documentClient.createDatabase(databaseDefinition, null) .getResource(); } catch (DocumentClientException e) { // TODO: Something has gone terribly wrong - the app wasn't // able to query or create the collection. // Verify your connection, endpoint, and key. e.printStackTrace(System.err); } } } return database; } private DocumentCollection getCollection(String collectionId) throws DocumentClientException { if (collection == null) { // Get the collection if it exists. List collectionList = documentClient .queryCollections(getDatabase().getSelfLink(), "SELECT * FROM root r WHERE r.id='" + collectionId + "'", null) .getQueryIterable() .toList(); if (collectionList.size() > 0) { // Cache the collection object so we won't have to query for it // later to retrieve the selfLink. collection = collectionList.get(0); } else { // Create the collection if it doesn't exist. try { DocumentCollection collectionDefinition = new DocumentCollection(); collectionDefinition.setId(collectionId); collection = documentClient .createCollection(getDatabase().getSelfLink(), collectionDefinition, null) .getResource(); } catch (DocumentClientException e) { // TODO: Something has gone terribly wrong - the app wasn't // able to query or create the collection. // Verify your connection, endpoint, and key. e.printStackTrace(System.err); throw e; } } } return collection; } private Document getDocumentById(String collectionId, String id) { if (collection == null) { return null; } // Retrieve the document using the DocumentClient. List documentList = documentClient .queryDocuments(collection.getSelfLink(), "SELECT * FROM root r WHERE r.id='" + id + "'", feedOptions) .getQueryIterable() .toList(); if (documentList.size() > 0) { return documentList.get(0); } return null; } } diff --git a/azuretablestorage/src/main/java/com/yahoo/ycsb/db/azuretablestorage/AzureClient.java b/azuretablestorage/src/main/java/com/yahoo/ycsb/db/azuretablestorage/AzureClient.java index 87b2a208..8247750c 100644 --- a/azuretablestorage/src/main/java/com/yahoo/ycsb/db/azuretablestorage/AzureClient.java +++ b/azuretablestorage/src/main/java/com/yahoo/ycsb/db/azuretablestorage/AzureClient.java @@ -1,279 +1,280 @@ /** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.azuretablestorage; import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.table.CloudTable; import com.microsoft.azure.storage.table.CloudTableClient; import com.microsoft.azure.storage.table.DynamicTableEntity; import com.microsoft.azure.storage.table.EntityProperty; import com.microsoft.azure.storage.table.EntityResolver; import com.microsoft.azure.storage.table.TableBatchOperation; import com.microsoft.azure.storage.table.TableOperation; import com.microsoft.azure.storage.table.TableQuery; import com.microsoft.azure.storage.table.TableServiceEntity; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import java.util.Date; import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * YCSB binding for Azure. * See {@code azure/README.md} for details. */ public class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set fields, - final HashMap result) { + Map result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery scanQuery = new TableQuery(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap properties = entity.getProperties(); HashMap cur = new HashMap(); for (Entry entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { return insertOrUpdate(key, values); } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ - public Status readSubset(String key, Set fields, HashMap result) { + public Status readSubset(String key, Set fields, Map result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver> resolver = new EntityResolver>() { public HashMap resolve(String partitionkey, String rowKey, Date timeStamp, HashMap properties, String etag) { HashMap tmp = new HashMap(); for (Entry entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } - private Status readEntity(String key, HashMap result) { + private Status readEntity(String key, Map result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap properties = entity.getProperties(); for (Entry entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } - private Status insertBatch(String key, HashMap values) { + private Status insertBatch(String key, Map values) { HashMap properties = new HashMap(); for (Entry entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } - private Status insertOrUpdate(String key, HashMap values) { + private Status insertOrUpdate(String key, Map values) { HashMap properties = new HashMap(); for (Entry entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } } diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java index ee2dd8f5..aefd7798 100644 --- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java +++ b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java @@ -1,498 +1,498 @@ /** * Copyright (c) 2013-2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. See accompanying LICENSE file. * * Submitted by Chrisjan Matser on 10/11/2010. */ package com.yahoo.ycsb.db; import com.datastax.driver.core.Cluster; import com.datastax.driver.core.ColumnDefinitions; import com.datastax.driver.core.ConsistencyLevel; import com.datastax.driver.core.Host; import com.datastax.driver.core.HostDistance; import com.datastax.driver.core.Metadata; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Row; import com.datastax.driver.core.Session; import com.datastax.driver.core.SimpleStatement; import com.datastax.driver.core.Statement; import com.datastax.driver.core.querybuilder.Insert; import com.datastax.driver.core.querybuilder.QueryBuilder; import com.datastax.driver.core.querybuilder.Select; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.Vector; import java.util.concurrent.atomic.AtomicInteger; /** * Cassandra 2.x CQL client. * * See {@code cassandra2/README.md} for details. * * @author cmatser */ public class CassandraCQLClient extends DB { private static Cluster cluster = null; private static Session session = null; private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.ONE; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.ONE; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = "ONE"; public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); if ((username != null) && !username.isEmpty()) { cluster = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts).build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); System.err.printf("Connected to cluster: %s\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { Statement stmt; Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = selectBuilder.from(table).where(QueryBuilder.eq(YCSB_KEY, key)) .limit(1); stmt.setConsistencyLevel(readConsistencyLevel); if (debug) { System.out.println(stmt.toString()); } if (trace) { stmt.enableTracing(); } ResultSet rs = session.execute(stmt); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { e.printStackTrace(); System.out.println("Error reading key: " + key); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { try { Statement stmt; Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = stmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token('"); scanStmt.append(startkey); scanStmt.append("')"); scanStmt.append(" LIMIT "); scanStmt.append(recordcount); stmt = new SimpleStatement(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (debug) { System.out.println(stmt.toString()); } if (trace) { stmt.enableTracing(); } ResultSet rs = session.execute(stmt); HashMap tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { e.printStackTrace(); System.out.println("Error scanning with startkey: " + startkey); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, - HashMap values) { + Map values) { // Insert and updates provide the same functionality return insert(table, key, values); } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, - HashMap values) { + Map values) { try { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, key); // Add fields for (Map.Entry entry : values.entrySet()) { Object value; ByteIterator byteIterator = entry.getValue(); value = byteIterator.toString(); insertStmt.value(entry.getKey(), value); } insertStmt.setConsistencyLevel(writeConsistencyLevel); if (debug) { System.out.println(insertStmt.toString()); } if (trace) { insertStmt.enableTracing(); } session.execute(insertStmt); return Status.OK; } catch (Exception e) { e.printStackTrace(); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { Statement stmt; stmt = QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, key)); stmt.setConsistencyLevel(writeConsistencyLevel); if (debug) { System.out.println(stmt.toString()); } if (trace) { stmt.enableTracing(); } session.execute(stmt); return Status.OK; } catch (Exception e) { e.printStackTrace(); System.out.println("Error deleting key: " + key); } return Status.ERROR; } } diff --git a/cassandra/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java b/cassandra/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java index 82c22983..9c136666 100644 --- a/cassandra/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java +++ b/cassandra/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java @@ -1,181 +1,181 @@ /** * Copyright (c) 2015 YCSB contributors All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import com.google.common.collect.Sets; import com.datastax.driver.core.ResultSet; import com.datastax.driver.core.Row; import com.datastax.driver.core.Session; import com.datastax.driver.core.Statement; import com.datastax.driver.core.querybuilder.Insert; import com.datastax.driver.core.querybuilder.QueryBuilder; import com.datastax.driver.core.querybuilder.Select; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import com.yahoo.ycsb.measurements.Measurements; import com.yahoo.ycsb.workloads.CoreWorkload; import org.cassandraunit.CassandraCQLUnit; import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; import org.junit.After; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; /** * Integration tests for the Cassandra client */ public class CassandraCQLClientTest { // Change the default Cassandra timeout from 10s to 120s for slow CI machines private final static long timeout = 120000L; private final static String TABLE = "usertable"; private final static String HOST = "localhost"; private final static int PORT = 9142; private final static String DEFAULT_ROW_KEY = "user1"; private CassandraCQLClient client; private Session session; @ClassRule public static CassandraCQLUnit cassandraUnit = new CassandraCQLUnit( new ClassPathCQLDataSet("ycsb.cql", "ycsb"), null, timeout); @Before public void setUp() throws Exception { session = cassandraUnit.getSession(); Properties p = new Properties(); p.setProperty("hosts", HOST); p.setProperty("port", Integer.toString(PORT)); p.setProperty("table", TABLE); Measurements.setProperties(p); final CoreWorkload workload = new CoreWorkload(); workload.init(p); client = new CassandraCQLClient(); client.setProperties(p); client.init(); } @After public void tearDownClient() throws Exception { if (client != null) { client.cleanup(); } client = null; } @After public void clearTable() throws Exception { // Clear the table so that each test starts fresh. final Statement truncate = QueryBuilder.truncate(TABLE); if (cassandraUnit != null) { cassandraUnit.getSession().execute(truncate); } } @Test public void testReadMissingRow() throws Exception { final HashMap result = new HashMap(); final Status status = client.read(TABLE, "Missing row", null, result); assertThat(result.size(), is(0)); assertThat(status, is(Status.NOT_FOUND)); } private void insertRow() { final String rowKey = DEFAULT_ROW_KEY; Insert insertStmt = QueryBuilder.insertInto(TABLE); insertStmt.value(CassandraCQLClient.YCSB_KEY, rowKey); insertStmt.value("field0", "value1"); insertStmt.value("field1", "value2"); session.execute(insertStmt); } @Test public void testRead() throws Exception { insertRow(); final HashMap result = new HashMap(); final Status status = client.read(TABLE, DEFAULT_ROW_KEY, null, result); assertThat(status, is(Status.OK)); assertThat(result.entrySet(), hasSize(11)); assertThat(result, hasEntry("field2", null)); final HashMap strResult = new HashMap(); for (final Map.Entry e : result.entrySet()) { if (e.getValue() != null) { strResult.put(e.getKey(), e.getValue().toString()); } } assertThat(strResult, hasEntry(CassandraCQLClient.YCSB_KEY, DEFAULT_ROW_KEY)); assertThat(strResult, hasEntry("field0", "value1")); assertThat(strResult, hasEntry("field1", "value2")); } @Test public void testReadSingleColumn() throws Exception { insertRow(); final HashMap result = new HashMap(); final Set fields = Sets.newHashSet("field1"); final Status status = client.read(TABLE, DEFAULT_ROW_KEY, fields, result); assertThat(status, is(Status.OK)); assertThat(result.entrySet(), hasSize(1)); final Map strResult = StringByteIterator.getStringMap(result); assertThat(strResult, hasEntry("field1", "value2")); } @Test public void testUpdate() throws Exception { final String key = "key"; - final HashMap input = new HashMap(); + final Map input = new HashMap(); input.put("field0", "value1"); input.put("field1", "value2"); final Status status = client.insert(TABLE, key, StringByteIterator.getByteIteratorMap(input)); assertThat(status, is(Status.OK)); // Verify result final Select selectStmt = QueryBuilder.select("field0", "field1") .from(TABLE) .where(QueryBuilder.eq(CassandraCQLClient.YCSB_KEY, key)) .limit(1); final ResultSet rs = session.execute(selectStmt); final Row row = rs.one(); assertThat(row, notNullValue()); assertThat(rs.isExhausted(), is(true)); assertThat(row.getString("field0"), is("value1")); assertThat(row.getString("field1"), is("value2")); } } diff --git a/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java index 1822c556..e909e813 100644 --- a/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java +++ b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java @@ -1,397 +1,397 @@ /** * Copyright (c) 2017 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.cloudspanner; import com.google.common.base.Joiner; import com.google.cloud.spanner.DatabaseId; import com.google.cloud.spanner.DatabaseClient; import com.google.cloud.spanner.Key; import com.google.cloud.spanner.KeySet; import com.google.cloud.spanner.KeyRange; import com.google.cloud.spanner.Mutation; import com.google.cloud.spanner.Options; import com.google.cloud.spanner.ResultSet; import com.google.cloud.spanner.SessionPoolOptions; import com.google.cloud.spanner.Spanner; import com.google.cloud.spanner.SpannerOptions; import com.google.cloud.spanner.Statement; import com.google.cloud.spanner.Struct; import com.google.cloud.spanner.StructReader; import com.google.cloud.spanner.TimestampBound; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.Client; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import com.yahoo.ycsb.workloads.CoreWorkload; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.util.logging.Level; import java.util.logging.Logger; import java.util.concurrent.TimeUnit; /** * YCSB Client for Google's Cloud Spanner. */ public class CloudSpannerClient extends DB { /** * The names of properties which can be specified in the config files and flags. */ public static final class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add("field" + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( - String table, String key, Set fields, HashMap result) { + String table, String key, Set fields, Map result) { Statement query; Iterable columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( - String table, String key, Set fields, HashMap result) { + String table, String key, Set fields, Map result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; } } private Status scanUsingQuery( String table, String startKey, int recordCount, Set fields, Vector> result) { Iterable columns = fields == null ? STANDARD_FIELDS : fields; Statement query; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id>=@startKey LIMIT @count") .bind("startKey").to(startKey) .bind("count").to(recordCount) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { while (resultSet.next()) { HashMap row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scanUsingQuery()", e); return Status.ERROR; } } @Override public Status scan( String table, String startKey, int recordCount, Set fields, Vector> result) { if (queriesForReads) { return scanUsingQuery(table, startKey, recordCount, fields, result); } Iterable columns = fields == null ? STANDARD_FIELDS : fields; KeySet keySet = KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); try (ResultSet resultSet = dbClient.singleUse(timestampBound) .read(table, keySet, columns, Options.limit(recordCount))) { while (resultSet.next()) { HashMap row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scan()", e); return Status.ERROR; } } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } try { dbClient.writeAtLeastOnce(Arrays.asList(m.build())); } catch (Exception e) { LOGGER.log(Level.INFO, "update()", e); return Status.ERROR; } return Status.OK; } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { if (bufferedMutations.size() < batchInserts) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } bufferedMutations.add(m.build()); } else { LOGGER.log(Level.INFO, "Limit of cached mutations reached. The given mutation with key " + key + " is ignored. Is this a retry?"); } if (bufferedMutations.size() < batchInserts) { return Status.BATCHED_OK; } try { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } catch (Exception e) { LOGGER.log(Level.INFO, "insert()", e); return Status.ERROR; } return Status.OK; } @Override public void cleanup() { try { if (bufferedMutations.size() > 0) { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } } catch (Exception e) { LOGGER.log(Level.INFO, "cleanup()", e); } } @Override public Status delete(String table, String key) { try { dbClient.writeAtLeastOnce(Arrays.asList(Mutation.delete(table, Key.of(key)))); } catch (Exception e) { LOGGER.log(Level.INFO, "delete()", e); return Status.ERROR; } return Status.OK; } private static void decodeStruct( - Iterable columns, StructReader structReader, HashMap result) { + Iterable columns, StructReader structReader, Map result) { for (String col : columns) { result.put(col, new StringByteIterator(structReader.getString(col))); } } } diff --git a/core/src/main/java/com/yahoo/ycsb/BasicDB.java b/core/src/main/java/com/yahoo/ycsb/BasicDB.java index dab23a01..dfb88a0c 100644 --- a/core/src/main/java/com/yahoo/ycsb/BasicDB.java +++ b/core/src/main/java/com/yahoo/ycsb/BasicDB.java @@ -1,269 +1,270 @@ /** * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb; import java.util.*; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.LockSupport; /** * Basic DB that just prints out the requested operations, instead of doing them against a database. */ public class BasicDB extends DB { public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; private boolean verbose; private boolean randomizedelay; private int todelay; public BasicDB() { todelay = 0; } private void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(Utils.random().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ @SuppressWarnings("unchecked") public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } } protected static final ThreadLocal TL_STRING_BUILDER = new ThreadLocal() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append(""); } sb.append("]"); System.out.println(sb); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append(""); } sb.append("]"); System.out.println(sb); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } return Status.OK; } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap fields = new HashMap(); fields.put("A", "X"); fields.put("B", "Y"); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap(); fields.put("C", "Z"); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ } diff --git a/core/src/main/java/com/yahoo/ycsb/CommandLine.java b/core/src/main/java/com/yahoo/ycsb/CommandLine.java index 9e95bd5a..ff781f41 100644 --- a/core/src/main/java/com/yahoo/ycsb/CommandLine.java +++ b/core/src/main/java/com/yahoo/ycsb/CommandLine.java @@ -1,349 +1,349 @@ /** * Copyright (c) 2010 Yahoo! Inc. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb; import com.yahoo.ycsb.workloads.CoreWorkload; import java.io.BufferedReader; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.util.*; /** * A simple command line client to a database, using the appropriate com.yahoo.ycsb.DB implementation. */ public final class CommandLine { private CommandLine() { //not used } public static final String DEFAULT_DB = "com.yahoo.ycsb.BasicDB"; public static void usageMessage() { System.out.println("YCSB Command Line Client"); System.out.println("Usage: java com.yahoo.ycsb.CommandLine [options]"); System.out.println("Options:"); System.out.println(" -P filename: Specify a property file"); System.out.println(" -p name=value: Specify a property value"); System.out.println(" -db classname: Use a specified DB class (can also set the \"db\" property)"); System.out.println(" -table tablename: Use the table name instead of the default \"" + CoreWorkload.TABLENAME_PROPERTY_DEFAULT + "\""); System.out.println(); } public static void help() { System.out.println("Commands:"); System.out.println(" read key [field1 field2 ...] - Read a record"); System.out.println(" scan key recordcount [field1 field2 ...] - Scan starting at key"); System.out.println(" insert key name1=value1 [name2=value2 ...] - Insert a new record"); System.out.println(" update key name1=value1 [name2=value2 ...] - Update a record"); System.out.println(" delete key - Delete a record"); System.out.println(" table [tablename] - Get or [set] the name of the table"); System.out.println(" quit - Quit"); } public static void main(String[] args) { Properties props = new Properties(); Properties fileprops = new Properties(); parseArguments(args, props, fileprops); for (Enumeration e = props.propertyNames(); e.hasMoreElements();) { String prop = (String) e.nextElement(); fileprops.setProperty(prop, props.getProperty(prop)); } props = fileprops; System.out.println("YCSB Command Line client"); System.out.println("Type \"help\" for command line help"); System.out.println("Start with \"-help\" for usage info"); String table = props.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); //create a DB String dbname = props.getProperty(Client.DB_PROPERTY, DEFAULT_DB); ClassLoader classLoader = CommandLine.class.getClassLoader(); DB db = null; try { Class dbclass = classLoader.loadClass(dbname); db = (DB) dbclass.newInstance(); } catch (Exception e) { e.printStackTrace(); System.exit(0); } db.setProperties(props); try { db.init(); } catch (DBException e) { e.printStackTrace(); System.exit(0); } System.out.println("Connected."); //main loop BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); for (;;) { //get user input System.out.print("> "); String input = null; try { input = br.readLine(); } catch (IOException e) { e.printStackTrace(); System.exit(1); } if (input.compareTo("") == 0) { continue; } if (input.compareTo("help") == 0) { help(); continue; } if (input.compareTo("quit") == 0) { break; } String[] tokens = input.split(" "); long st = System.currentTimeMillis(); //handle commands if (tokens[0].compareTo("table") == 0) { handleTable(tokens, table); } else if (tokens[0].compareTo("read") == 0) { handleRead(tokens, table, db); } else if (tokens[0].compareTo("scan") == 0) { handleScan(tokens, table, db); } else if (tokens[0].compareTo("update") == 0) { handleUpdate(tokens, table, db); } else if (tokens[0].compareTo("insert") == 0) { handleInsert(tokens, table, db); } else if (tokens[0].compareTo("delete") == 0) { handleDelete(tokens, table, db); } else { System.out.println("Error: unknown command \"" + tokens[0] + "\""); } System.out.println((System.currentTimeMillis() - st) + " ms"); } } private static void parseArguments(String[] args, Properties props, Properties fileprops) { int argindex = 0; while ((argindex < args.length) && (args[argindex].startsWith("-"))) { if ((args[argindex].compareTo("-help") == 0) || (args[argindex].compareTo("--help") == 0) || (args[argindex].compareTo("-?") == 0) || (args[argindex].compareTo("--?") == 0)) { usageMessage(); System.exit(0); } if (args[argindex].compareTo("-db") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } props.setProperty(Client.DB_PROPERTY, args[argindex]); argindex++; } else if (args[argindex].compareTo("-P") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } String propfile = args[argindex]; argindex++; Properties myfileprops = new Properties(); try { myfileprops.load(new FileInputStream(propfile)); } catch (IOException e) { System.out.println(e.getMessage()); System.exit(0); } for (Enumeration e = myfileprops.propertyNames(); e.hasMoreElements();) { String prop = (String) e.nextElement(); fileprops.setProperty(prop, myfileprops.getProperty(prop)); } } else if (args[argindex].compareTo("-p") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } int eq = args[argindex].indexOf('='); if (eq < 0) { usageMessage(); System.exit(0); } String name = args[argindex].substring(0, eq); String value = args[argindex].substring(eq + 1); props.put(name, value); argindex++; } else if (args[argindex].compareTo("-table") == 0) { argindex++; if (argindex >= args.length) { usageMessage(); System.exit(0); } props.put(CoreWorkload.TABLENAME_PROPERTY, args[argindex]); argindex++; } else { System.out.println("Unknown option " + args[argindex]); usageMessage(); System.exit(0); } if (argindex >= args.length) { break; } } if (argindex != args.length) { usageMessage(); System.exit(0); } } private static void handleDelete(String[] tokens, String table, DB db) { if (tokens.length != 2) { System.out.println("Error: syntax is \"delete keyname\""); } else { Status ret = db.delete(table, tokens[1]); System.out.println("Return result: " + ret.getName()); } } private static void handleInsert(String[] tokens, String table, DB db) { if (tokens.length < 3) { System.out.println("Error: syntax is \"insert keyname name1=value1 [name2=value2 ...]\""); } else { HashMap values = new HashMap<>(); for (int i = 2; i < tokens.length; i++) { String[] nv = tokens[i].split("="); values.put(nv[0], new StringByteIterator(nv[1])); } Status ret = db.insert(table, tokens[1], values); System.out.println("Result: " + ret.getName()); } } private static void handleUpdate(String[] tokens, String table, DB db) { if (tokens.length < 3) { System.out.println("Error: syntax is \"update keyname name1=value1 [name2=value2 ...]\""); } else { HashMap values = new HashMap<>(); for (int i = 2; i < tokens.length; i++) { String[] nv = tokens[i].split("="); values.put(nv[0], new StringByteIterator(nv[1])); } Status ret = db.update(table, tokens[1], values); System.out.println("Result: " + ret.getName()); } } private static void handleScan(String[] tokens, String table, DB db) { if (tokens.length < 3) { System.out.println("Error: syntax is \"scan keyname scanlength [field1 field2 ...]\""); } else { Set fields = null; if (tokens.length > 3) { fields = new HashSet<>(); fields.addAll(Arrays.asList(tokens).subList(3, tokens.length)); } Vector> results = new Vector<>(); Status ret = db.scan(table, tokens[1], Integer.parseInt(tokens[2]), fields, results); System.out.println("Result: " + ret.getName()); int record = 0; if (results.isEmpty()) { System.out.println("0 records"); } else { System.out.println("--------------------------------"); } - for (HashMap result : results) { + for (Map result : results) { System.out.println("Record " + (record++)); for (Map.Entry ent : result.entrySet()) { System.out.println(ent.getKey() + "=" + ent.getValue()); } System.out.println("--------------------------------"); } } } private static void handleRead(String[] tokens, String table, DB db) { if (tokens.length == 1) { System.out.println("Error: syntax is \"read keyname [field1 field2 ...]\""); } else { Set fields = null; if (tokens.length > 2) { fields = new HashSet<>(); fields.addAll(Arrays.asList(tokens).subList(2, tokens.length)); } HashMap result = new HashMap<>(); Status ret = db.read(table, tokens[1], fields, result); System.out.println("Return code: " + ret.getName()); for (Map.Entry ent : result.entrySet()) { System.out.println(ent.getKey() + "=" + ent.getValue()); } } } private static void handleTable(String[] tokens, String table) { if (tokens.length == 1) { System.out.println("Using table \"" + table + "\""); } else if (tokens.length == 2) { table = tokens[1]; System.out.println("Using table \"" + table + "\""); } else { System.out.println("Error: syntax is \"table tablename\""); } } } diff --git a/core/src/main/java/com/yahoo/ycsb/DB.java b/core/src/main/java/com/yahoo/ycsb/DB.java index 2002474b..b93fd483 100644 --- a/core/src/main/java/com/yahoo/ycsb/DB.java +++ b/core/src/main/java/com/yahoo/ycsb/DB.java @@ -1,134 +1,135 @@ /** * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb; import java.util.HashMap; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * A layer for accessing a database to be benchmarked. Each thread in the client * will be given its own instance of whatever DB class is to be used in the test. * This class should be constructed using a no-argument constructor, so we can * load it dynamically. Any argument-based initialization should be * done by init(). * * Note that YCSB does not make any use of the return codes returned by this class. * Instead, it keeps a count of the return values and presents them to the user. * * The semantics of methods such as insert, update and delete vary from database * to database. In particular, operations may or may not be durable once these * methods commit, and some systems may return 'success' regardless of whether * or not a tuple with a matching key existed before the call. Rather than dictate * the exact semantics of these methods, we recommend you either implement them * to match the database's default semantics, or the semantics of your * target application. For the sake of comparison between experiments we also * recommend you explain the semantics you chose when presenting performance results. */ public abstract class DB { /** * Properties for configuring this DB. */ private Properties properties = new Properties(); /** * Set the properties for this DB. */ public void setProperties(Properties p) { properties = p; } /** * Get the set of properties for this DB. */ public Properties getProperties() { return properties; } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() throws DBException { } /** * Cleanup any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void cleanup() throws DBException { } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return The result of the operation. */ - public abstract Status read(String table, String key, Set fields, HashMap result); + public abstract Status read(String table, String key, Set fields, Map result); /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return The result of the operation. */ public abstract Status scan(String table, String startkey, int recordcount, Set fields, Vector> result); /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return The result of the operation. */ - public abstract Status update(String table, String key, HashMap values); + public abstract Status update(String table, String key, Map values); /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return The result of the operation. */ - public abstract Status insert(String table, String key, HashMap values); + public abstract Status insert(String table, String key, Map values); /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return The result of the operation. */ public abstract Status delete(String table, String key); } diff --git a/core/src/main/java/com/yahoo/ycsb/DBWrapper.java b/core/src/main/java/com/yahoo/ycsb/DBWrapper.java index 880cccc1..a15664e8 100644 --- a/core/src/main/java/com/yahoo/ycsb/DBWrapper.java +++ b/core/src/main/java/com/yahoo/ycsb/DBWrapper.java @@ -1,246 +1,247 @@ /** * Copyright (c) 2010 Yahoo! Inc., 2016-2017 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb; +import java.util.Map; import com.yahoo.ycsb.measurements.Measurements; import org.apache.htrace.core.TraceScope; import org.apache.htrace.core.Tracer; import java.util.*; /** * Wrapper around a "real" DB that measures latencies and counts return codes. * Also reports latency separately between OK and failed operations. */ public class DBWrapper extends DB { private final DB db; private final Measurements measurements; private final Tracer tracer; private boolean reportLatencyForEachError = false; - private HashSet latencyTrackedErrors = new HashSet(); + private Set latencyTrackedErrors = new HashSet(); private static final String REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY = "reportlatencyforeacherror"; private static final String REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT = "false"; private static final String LATENCY_TRACKED_ERRORS_PROPERTY = "latencytrackederrors"; private final String scopeStringCleanup; private final String scopeStringDelete; private final String scopeStringInit; private final String scopeStringInsert; private final String scopeStringRead; private final String scopeStringScan; private final String scopeStringUpdate; public DBWrapper(final DB db, final Tracer tracer) { this.db = db; measurements = Measurements.getMeasurements(); this.tracer = tracer; final String simple = db.getClass().getSimpleName(); scopeStringCleanup = simple + "#cleanup"; scopeStringDelete = simple + "#delete"; scopeStringInit = simple + "#init"; scopeStringInsert = simple + "#insert"; scopeStringRead = simple + "#read"; scopeStringScan = simple + "#scan"; scopeStringUpdate = simple + "#update"; } /** * Set the properties for this DB. */ public void setProperties(Properties p) { db.setProperties(p); } /** * Get the set of properties for this DB. */ public Properties getProperties() { return db.getProperties(); } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() throws DBException { try (final TraceScope span = tracer.newScope(scopeStringInit)) { db.init(); this.reportLatencyForEachError = Boolean.parseBoolean(getProperties(). getProperty(REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY, REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT)); if (!reportLatencyForEachError) { String latencyTrackedErrorsProperty = getProperties().getProperty(LATENCY_TRACKED_ERRORS_PROPERTY, null); if (latencyTrackedErrorsProperty != null) { this.latencyTrackedErrors = new HashSet(Arrays.asList( latencyTrackedErrorsProperty.split(","))); } } System.err.println("DBWrapper: report latency for each error is " + this.reportLatencyForEachError + " and specific error codes to track" + " for latency are: " + this.latencyTrackedErrors.toString()); } } /** * Cleanup any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void cleanup() throws DBException { try (final TraceScope span = tracer.newScope(scopeStringCleanup)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); db.cleanup(); long en = System.nanoTime(); measure("CLEANUP", Status.OK, ist, st, en); } } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return The result of the operation. */ public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try (final TraceScope span = tracer.newScope(scopeStringRead)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); Status res = db.read(table, key, fields, result); long en = System.nanoTime(); measure("READ", res, ist, st, en); measurements.reportStatus("READ", res); return res; } } /** * Perform a range scan for a set of records in the database. * Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return The result of the operation. */ public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { try (final TraceScope span = tracer.newScope(scopeStringScan)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); Status res = db.scan(table, startkey, recordcount, fields, result); long en = System.nanoTime(); measure("SCAN", res, ist, st, en); measurements.reportStatus("SCAN", res); return res; } } private void measure(String op, Status result, long intendedStartTimeNanos, long startTimeNanos, long endTimeNanos) { String measurementName = op; if (result == null || !result.isOk()) { if (this.reportLatencyForEachError || this.latencyTrackedErrors.contains(result.getName())) { measurementName = op + "-" + result.getName(); } else { measurementName = op + "-FAILED"; } } measurements.measure(measurementName, (int) ((endTimeNanos - startTimeNanos) / 1000)); measurements.measureIntended(measurementName, (int) ((endTimeNanos - intendedStartTimeNanos) / 1000)); } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return The result of the operation. */ public Status update(String table, String key, - HashMap values) { + Map values) { try (final TraceScope span = tracer.newScope(scopeStringUpdate)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); Status res = db.update(table, key, values); long en = System.nanoTime(); measure("UPDATE", res, ist, st, en); measurements.reportStatus("UPDATE", res); return res; } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified * record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return The result of the operation. */ public Status insert(String table, String key, - HashMap values) { + Map values) { try (final TraceScope span = tracer.newScope(scopeStringInsert)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); Status res = db.insert(table, key, values); long en = System.nanoTime(); measure("INSERT", res, ist, st, en); measurements.reportStatus("INSERT", res); return res; } } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return The result of the operation. */ public Status delete(String table, String key) { try (final TraceScope span = tracer.newScope(scopeStringDelete)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); Status res = db.delete(table, key); long en = System.nanoTime(); measure("DELETE", res, ist, st, en); measurements.reportStatus("DELETE", res); return res; } } } diff --git a/core/src/main/java/com/yahoo/ycsb/GoodBadUglyDB.java b/core/src/main/java/com/yahoo/ycsb/GoodBadUglyDB.java index db4ba3fd..1cbf3a5d 100644 --- a/core/src/main/java/com/yahoo/ycsb/GoodBadUglyDB.java +++ b/core/src/main/java/com/yahoo/ycsb/GoodBadUglyDB.java @@ -1,159 +1,160 @@ /** * Copyright (c) 2010 Yahoo! Inc. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb; import java.util.HashMap; +import java.util.Map; import java.util.Random; import java.util.Set; import java.util.Vector; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.LockSupport; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import static java.util.concurrent.TimeUnit.MICROSECONDS; /** * Basic DB that just prints out the requested operations, instead of doing them against a database. */ public class GoodBadUglyDB extends DB { public static final String SIMULATE_DELAY = "gbudb.delays"; public static final String SIMULATE_DELAY_DEFAULT = "200,1000,10000,50000,100000"; private static final ReadWriteLock DB_ACCESS = new ReentrantReadWriteLock(); private long[] delays; public GoodBadUglyDB() { delays = new long[]{200, 1000, 10000, 50000, 200000}; } private void delay() { final Random random = Utils.random(); double p = random.nextDouble(); int mod; if (p < 0.9) { mod = 0; } else if (p < 0.99) { mod = 1; } else if (p < 0.9999) { mod = 2; } else { mod = 3; } // this will make mod 3 pauses global Lock lock = mod == 3 ? DB_ACCESS.writeLock() : DB_ACCESS.readLock(); if (mod == 3) { System.out.println("OUCH"); } lock.lock(); try { final long baseDelayNs = MICROSECONDS.toNanos(delays[mod]); final int delayRangeNs = (int) (MICROSECONDS.toNanos(delays[mod + 1]) - baseDelayNs); final long delayNs = baseDelayNs + random.nextInt(delayRangeNs); final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } finally { lock.unlock(); } } /** * Initialize any state for this DB. Called once per DB instance; there is one DB instance per client thread. */ public void init() { int i = 0; for (String delay : getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT).split(",")) { delays[i++] = Long.parseLong(delay); } } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { delay(); return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { delay(); return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { delay(); return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { delay(); return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); return Status.OK; } } diff --git a/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java b/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java index 13a79600..82235491 100644 --- a/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java +++ b/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java @@ -1,121 +1,121 @@ /** * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb; import java.util.HashMap; import java.util.Map; /** * A ByteIterator that iterates through a string. */ public class StringByteIterator extends ByteIterator { private String str; private int off; /** * Put all of the entries of one map into the other, converting * String values into ByteIterators. */ public static void putAllAsByteIterators(Map out, Map in) { for (Map.Entry entry : in.entrySet()) { out.put(entry.getKey(), new StringByteIterator(entry.getValue())); } } /** * Put all of the entries of one map into the other, converting * ByteIterator values into Strings. */ public static void putAllAsStrings(Map out, Map in) { for (Map.Entry entry : in.entrySet()) { out.put(entry.getKey(), entry.getValue().toString()); } } /** * Create a copy of a map, converting the values from Strings to * StringByteIterators. */ - public static HashMap getByteIteratorMap(Map m) { + public static Map getByteIteratorMap(Map m) { HashMap ret = new HashMap(); for (Map.Entry entry : m.entrySet()) { ret.put(entry.getKey(), new StringByteIterator(entry.getValue())); } return ret; } /** * Create a copy of a map, converting the values from * StringByteIterators to Strings. */ - public static HashMap getStringMap(Map m) { + public static Map getStringMap(Map m) { HashMap ret = new HashMap(); for (Map.Entry entry : m.entrySet()) { ret.put(entry.getKey(), entry.getValue().toString()); } return ret; } public StringByteIterator(String s) { this.str = s; this.off = 0; } @Override public boolean hasNext() { return off < str.length(); } @Override public byte nextByte() { byte ret = (byte) str.charAt(off); off++; return ret; } @Override public long bytesLeft() { return str.length() - off; } @Override public void reset() { off = 0; } /** * Specialization of general purpose toString() to avoid unnecessary * copies. *

* Creating a new StringByteIterator, then calling toString() * yields the original String object, and does not perform any copies * or String conversion operations. *

*/ @Override public String toString() { if (off > 0) { return super.toString(); } else { return str; } } } diff --git a/couchbase/src/main/java/com/yahoo/ycsb/db/CouchbaseClient.java b/couchbase/src/main/java/com/yahoo/ycsb/db/CouchbaseClient.java index 4a829202..7a0f1be2 100644 --- a/couchbase/src/main/java/com/yahoo/ycsb/db/CouchbaseClient.java +++ b/couchbase/src/main/java/com/yahoo/ycsb/db/CouchbaseClient.java @@ -1,357 +1,357 @@ /** * Copyright (c) 2013 - 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.couchbase.client.protocol.views.*; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import net.spy.memcached.PersistTo; import net.spy.memcached.ReplicateTo; import net.spy.memcached.internal.OperationFuture; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.StringWriter; import java.io.Writer; import java.net.URI; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * A class that wraps the CouchbaseClient to allow it to be interfaced with YCSB. * This class extends {@link DB} and implements the database interface used by YCSB client. */ public class CouchbaseClient extends DB { public static final String URL_PROPERTY = "couchbase.url"; public static final String BUCKET_PROPERTY = "couchbase.bucket"; public static final String PASSWORD_PROPERTY = "couchbase.password"; public static final String CHECKF_PROPERTY = "couchbase.checkFutures"; public static final String PERSIST_PROPERTY = "couchbase.persistTo"; public static final String REPLICATE_PROPERTY = "couchbase.replicateTo"; public static final String JSON_PROPERTY = "couchbase.json"; public static final String DESIGN_DOC_PROPERTY = "couchbase.ddoc"; public static final String VIEW_PROPERTY = "couchbase.view"; public static final String STALE_PROPERTY = "couchbase.stale"; public static final String SCAN_PROPERTY = "scanproportion"; public static final String STALE_PROPERTY_DEFAULT = Stale.OK.name(); public static final String SCAN_PROPERTY_DEFAULT = "0.0"; protected static final ObjectMapper JSON_MAPPER = new ObjectMapper(); private com.couchbase.client.CouchbaseClient client; private PersistTo persistTo; private ReplicateTo replicateTo; private boolean checkFutures; private boolean useJson; private String designDoc; private String viewName; private Stale stale; private View view; private final Logger log = LoggerFactory.getLogger(getClass()); @Override public void init() throws DBException { Properties props = getProperties(); String url = props.getProperty(URL_PROPERTY, "http://127.0.0.1:8091/pools"); String bucket = props.getProperty(BUCKET_PROPERTY, "default"); String password = props.getProperty(PASSWORD_PROPERTY, ""); checkFutures = props.getProperty(CHECKF_PROPERTY, "true").equals("true"); useJson = props.getProperty(JSON_PROPERTY, "true").equals("true"); persistTo = parsePersistTo(props.getProperty(PERSIST_PROPERTY, "0")); replicateTo = parseReplicateTo(props.getProperty(REPLICATE_PROPERTY, "0")); designDoc = getProperties().getProperty(DESIGN_DOC_PROPERTY); viewName = getProperties().getProperty(VIEW_PROPERTY); stale = Stale.valueOf(getProperties().getProperty(STALE_PROPERTY, STALE_PROPERTY_DEFAULT).toUpperCase()); Double scanproportion = Double.valueOf(props.getProperty(SCAN_PROPERTY, SCAN_PROPERTY_DEFAULT)); Properties systemProperties = System.getProperties(); systemProperties.put("net.spy.log.LoggerImpl", "net.spy.memcached.compat.log.SLF4JLogger"); System.setProperties(systemProperties); try { client = new com.couchbase.client.CouchbaseClient(Arrays.asList(new URI(url)), bucket, password); } catch (Exception e) { throw new DBException("Could not create CouchbaseClient object.", e); } if (scanproportion > 0) { try { view = client.getView(designDoc, viewName); } catch (Exception e) { throw new DBException(String.format("%s=%s and %s=%s provided, unable to connect to view.", DESIGN_DOC_PROPERTY, designDoc, VIEW_PROPERTY, viewName), e.getCause()); } } } /** * Parse the replicate property into the correct enum. * * @param property the stringified property value. * @throws DBException if parsing the property did fail. * @return the correct enum. */ private ReplicateTo parseReplicateTo(final String property) throws DBException { int value = Integer.parseInt(property); switch (value) { case 0: return ReplicateTo.ZERO; case 1: return ReplicateTo.ONE; case 2: return ReplicateTo.TWO; case 3: return ReplicateTo.THREE; default: throw new DBException(REPLICATE_PROPERTY + " must be between 0 and 3"); } } /** * Parse the persist property into the correct enum. * * @param property the stringified property value. * @throws DBException if parsing the property did fail. * @return the correct enum. */ private PersistTo parsePersistTo(final String property) throws DBException { int value = Integer.parseInt(property); switch (value) { case 0: return PersistTo.ZERO; case 1: return PersistTo.ONE; case 2: return PersistTo.TWO; case 3: return PersistTo.THREE; case 4: return PersistTo.FOUR; default: throw new DBException(PERSIST_PROPERTY + " must be between 0 and 4"); } } /** * Shutdown the client. */ @Override public void cleanup() { client.shutdown(); } @Override public Status read(final String table, final String key, final Set fields, - final HashMap result) { + final Map result) { String formattedKey = formatKey(table, key); try { Object loaded = client.get(formattedKey); if (loaded == null) { return Status.ERROR; } decode(loaded, fields, result); return Status.OK; } catch (Exception e) { if (log.isErrorEnabled()) { log.error("Could not read value for key " + formattedKey, e); } return Status.ERROR; } } @Override public Status scan(final String table, final String startkey, final int recordcount, final Set fields, final Vector> result) { try { Query query = new Query().setRangeStart(startkey) .setLimit(recordcount) .setIncludeDocs(true) .setStale(stale); ViewResponse response = client.query(view, query); for (ViewRow row : response) { HashMap rowMap = new HashMap(); decode(row.getDocument(), fields, rowMap); result.add(rowMap); } return Status.OK; } catch (Exception e) { log.error(e.getMessage()); } return Status.ERROR; } @Override - public Status update(final String table, final String key, final HashMap values) { + public Status update(final String table, final String key, final Map values) { String formattedKey = formatKey(table, key); try { final OperationFuture future = client.replace(formattedKey, encode(values), persistTo, replicateTo); return checkFutureStatus(future); } catch (Exception e) { if (log.isErrorEnabled()) { log.error("Could not update value for key " + formattedKey, e); } return Status.ERROR; } } @Override - public Status insert(final String table, final String key, final HashMap values) { + public Status insert(final String table, final String key, final Map values) { String formattedKey = formatKey(table, key); try { final OperationFuture future = client.add(formattedKey, encode(values), persistTo, replicateTo); return checkFutureStatus(future); } catch (Exception e) { if (log.isErrorEnabled()) { log.error("Could not insert value for key " + formattedKey, e); } return Status.ERROR; } } @Override public Status delete(final String table, final String key) { String formattedKey = formatKey(table, key); try { final OperationFuture future = client.delete(formattedKey, persistTo, replicateTo); return checkFutureStatus(future); } catch (Exception e) { if (log.isErrorEnabled()) { log.error("Could not delete value for key " + formattedKey, e); } return Status.ERROR; } } /** * Prefix the key with the given prefix, to establish a unique namespace. * * @param prefix the prefix to use. * @param key the actual key. * @return the formatted and prefixed key. */ private String formatKey(final String prefix, final String key) { return prefix + ":" + key; } /** * Wrapper method that either inspects the future or not. * * @param future the future to potentially verify. * @return the status of the future result. */ private Status checkFutureStatus(final OperationFuture future) { if (checkFutures) { return future.getStatus().isSuccess() ? Status.OK : Status.ERROR; } else { return Status.OK; } } /** * Decode the object from server into the storable result. * * @param source the loaded object. * @param fields the fields to check. * @param dest the result passed back to the ycsb core. */ - private void decode(final Object source, final Set fields, final HashMap dest) { + private void decode(final Object source, final Set fields, final Map dest) { if (useJson) { try { JsonNode json = JSON_MAPPER.readTree((String) source); boolean checkFields = fields != null && !fields.isEmpty(); for (Iterator> jsonFields = json.fields(); jsonFields.hasNext();) { Map.Entry jsonField = jsonFields.next(); String name = jsonField.getKey(); if (checkFields && fields.contains(name)) { continue; } JsonNode jsonValue = jsonField.getValue(); if (jsonValue != null && !jsonValue.isNull()) { dest.put(name, new StringByteIterator(jsonValue.asText())); } } } catch (Exception e) { throw new RuntimeException("Could not decode JSON"); } } else { - HashMap converted = (HashMap) source; + Map converted = (HashMap) source; for (Map.Entry entry : converted.entrySet()) { dest.put(entry.getKey(), new StringByteIterator(entry.getValue())); } } } /** * Encode the object for couchbase storage. * * @param source the source value. * @return the storable object. */ - private Object encode(final HashMap source) { - HashMap stringMap = StringByteIterator.getStringMap(source); + private Object encode(final Map source) { + Map stringMap = StringByteIterator.getStringMap(source); if (!useJson) { return stringMap; } ObjectNode node = JSON_MAPPER.createObjectNode(); for (Map.Entry pair : stringMap.entrySet()) { node.put(pair.getKey(), pair.getValue()); } JsonFactory jsonFactory = new JsonFactory(); Writer writer = new StringWriter(); try { JsonGenerator jsonGenerator = jsonFactory.createGenerator(writer); JSON_MAPPER.writeTree(jsonGenerator, node); } catch (Exception e) { throw new RuntimeException("Could not encode JSON value"); } return writer.toString(); } } diff --git a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java index 6697a0c3..251aaf11 100644 --- a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java +++ b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java @@ -1,940 +1,940 @@ /** * Copyright (c) 2016 Yahoo! Inc. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.couchbase2; import com.couchbase.client.core.env.DefaultCoreEnvironment; import com.couchbase.client.core.env.resources.IoPoolShutdownHook; import com.couchbase.client.core.logging.CouchbaseLogger; import com.couchbase.client.core.logging.CouchbaseLoggerFactory; import com.couchbase.client.core.metrics.DefaultLatencyMetricsCollectorConfig; import com.couchbase.client.core.metrics.DefaultMetricsCollectorConfig; import com.couchbase.client.core.metrics.LatencyMetricsCollectorConfig; import com.couchbase.client.core.metrics.MetricsCollectorConfig; import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonFactory; import com.couchbase.client.deps.com.fasterxml.jackson.core.JsonGenerator; import com.couchbase.client.deps.com.fasterxml.jackson.databind.JsonNode; import com.couchbase.client.deps.com.fasterxml.jackson.databind.node.ObjectNode; import com.couchbase.client.deps.io.netty.channel.DefaultSelectStrategyFactory; import com.couchbase.client.deps.io.netty.channel.EventLoopGroup; import com.couchbase.client.deps.io.netty.channel.SelectStrategy; import com.couchbase.client.deps.io.netty.channel.SelectStrategyFactory; import com.couchbase.client.deps.io.netty.channel.epoll.EpollEventLoopGroup; import com.couchbase.client.deps.io.netty.channel.nio.NioEventLoopGroup; import com.couchbase.client.deps.io.netty.util.IntSupplier; import com.couchbase.client.deps.io.netty.util.concurrent.DefaultThreadFactory; import com.couchbase.client.java.Bucket; import com.couchbase.client.java.Cluster; import com.couchbase.client.java.CouchbaseCluster; import com.couchbase.client.java.PersistTo; import com.couchbase.client.java.ReplicateTo; import com.couchbase.client.java.document.Document; import com.couchbase.client.java.document.RawJsonDocument; import com.couchbase.client.java.document.json.JsonArray; import com.couchbase.client.java.document.json.JsonObject; import com.couchbase.client.java.env.CouchbaseEnvironment; import com.couchbase.client.java.env.DefaultCouchbaseEnvironment; import com.couchbase.client.java.error.TemporaryFailureException; import com.couchbase.client.java.query.*; import com.couchbase.client.java.transcoder.JacksonTransformers; import com.couchbase.client.java.util.Blocking; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import rx.Observable; import rx.Subscriber; import rx.functions.Action1; import rx.functions.Func1; import java.io.StringWriter; import java.io.Writer; import java.nio.channels.spi.SelectorProvider; import java.util.*; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.LockSupport; /** * A class that wraps the 2.x Couchbase SDK to be used with YCSB. * *

The following options can be passed when using this database client to override the defaults. * *

    *
  • couchbase.host=127.0.0.1 The hostname from one server.
  • *
  • couchbase.bucket=default The bucket name to use.
  • *
  • couchbase.password= The password of the bucket.
  • *
  • couchbase.syncMutationResponse=true If mutations should wait for the response to complete.
  • *
  • couchbase.persistTo=0 Persistence durability requirement
  • *
  • couchbase.replicateTo=0 Replication durability requirement
  • *
  • couchbase.upsert=false Use upsert instead of insert or replace.
  • *
  • couchbase.adhoc=false If set to true, prepared statements are not used.
  • *
  • couchbase.kv=true If set to false, mutation operations will also be performed through N1QL.
  • *
  • couchbase.maxParallelism=1 The server parallelism for all n1ql queries.
  • *
  • couchbase.kvEndpoints=1 The number of KV sockets to open per server.
  • *
  • couchbase.queryEndpoints=5 The number of N1QL Query sockets to open per server.
  • *
  • couchbase.epoll=false If Epoll instead of NIO should be used (only available for linux.
  • *
  • couchbase.boost=3 If > 0 trades CPU for higher throughput. N is the number of event loops, ideally * set to the number of physical cores. Setting higher than that will likely degrade performance.
  • *
  • couchbase.networkMetricsInterval=0 The interval in seconds when latency metrics will be logged.
  • *
  • couchbase.runtimeMetricsInterval=0 The interval in seconds when runtime metrics will be logged.
  • *
  • couchbase.documentExpiry=0 Document Expiry is the amount of time until a document expires in * Couchbase.
  • *
*/ public class Couchbase2Client extends DB { static { // No need to send the full encoded_plan for this benchmark workload, less network overhead! System.setProperty("com.couchbase.query.encodedPlanEnabled", "false"); } private static final String SEPARATOR = ":"; private static final CouchbaseLogger LOGGER = CouchbaseLoggerFactory.getInstance(Couchbase2Client.class); private static final Object INIT_COORDINATOR = new Object(); private static volatile CouchbaseEnvironment env = null; private Cluster cluster; private Bucket bucket; private String bucketName; private boolean upsert; private PersistTo persistTo; private ReplicateTo replicateTo; private boolean syncMutResponse; private boolean epoll; private long kvTimeout; private boolean adhoc; private boolean kv; private int maxParallelism; private String host; private int kvEndpoints; private int queryEndpoints; private int boost; private int networkMetricsInterval; private int runtimeMetricsInterval; private String scanAllQuery; private int documentExpiry; @Override public void init() throws DBException { Properties props = getProperties(); host = props.getProperty("couchbase.host", "127.0.0.1"); bucketName = props.getProperty("couchbase.bucket", "default"); String bucketPassword = props.getProperty("couchbase.password", ""); upsert = props.getProperty("couchbase.upsert", "false").equals("true"); persistTo = parsePersistTo(props.getProperty("couchbase.persistTo", "0")); replicateTo = parseReplicateTo(props.getProperty("couchbase.replicateTo", "0")); syncMutResponse = props.getProperty("couchbase.syncMutationResponse", "true").equals("true"); adhoc = props.getProperty("couchbase.adhoc", "false").equals("true"); kv = props.getProperty("couchbase.kv", "true").equals("true"); maxParallelism = Integer.parseInt(props.getProperty("couchbase.maxParallelism", "1")); kvEndpoints = Integer.parseInt(props.getProperty("couchbase.kvEndpoints", "1")); queryEndpoints = Integer.parseInt(props.getProperty("couchbase.queryEndpoints", "1")); epoll = props.getProperty("couchbase.epoll", "false").equals("true"); boost = Integer.parseInt(props.getProperty("couchbase.boost", "3")); networkMetricsInterval = Integer.parseInt(props.getProperty("couchbase.networkMetricsInterval", "0")); runtimeMetricsInterval = Integer.parseInt(props.getProperty("couchbase.runtimeMetricsInterval", "0")); documentExpiry = Integer.parseInt(props.getProperty("couchbase.documentExpiry", "0")); scanAllQuery = "SELECT RAW meta().id FROM `" + bucketName + "` WHERE meta().id >= '$1' ORDER BY meta().id LIMIT $2"; try { synchronized (INIT_COORDINATOR) { if (env == null) { LatencyMetricsCollectorConfig latencyConfig = networkMetricsInterval <= 0 ? DefaultLatencyMetricsCollectorConfig.disabled() : DefaultLatencyMetricsCollectorConfig .builder() .emitFrequency(networkMetricsInterval) .emitFrequencyUnit(TimeUnit.SECONDS) .build(); MetricsCollectorConfig runtimeConfig = runtimeMetricsInterval <= 0 ? DefaultMetricsCollectorConfig.disabled() : DefaultMetricsCollectorConfig.create(runtimeMetricsInterval, TimeUnit.SECONDS); DefaultCouchbaseEnvironment.Builder builder = DefaultCouchbaseEnvironment .builder() .queryEndpoints(queryEndpoints) .callbacksOnIoPool(true) .runtimeMetricsCollectorConfig(runtimeConfig) .networkLatencyMetricsCollectorConfig(latencyConfig) .socketConnectTimeout(10000) // 10 secs socket connect timeout .connectTimeout(30000) // 30 secs overall bucket open timeout .kvTimeout(10000) // 10 instead of 2.5s for KV ops .kvEndpoints(kvEndpoints); // Tune boosting and epoll based on settings SelectStrategyFactory factory = boost > 0 ? new BackoffSelectStrategyFactory() : DefaultSelectStrategyFactory.INSTANCE; int poolSize = boost > 0 ? boost : Integer.parseInt( System.getProperty("com.couchbase.ioPoolSize", Integer.toString(DefaultCoreEnvironment.IO_POOL_SIZE)) ); ThreadFactory threadFactory = new DefaultThreadFactory("cb-io", true); EventLoopGroup group = epoll ? new EpollEventLoopGroup(poolSize, threadFactory, factory) : new NioEventLoopGroup(poolSize, threadFactory, SelectorProvider.provider(), factory); builder.ioPool(group, new IoPoolShutdownHook(group)); env = builder.build(); logParams(); } } cluster = CouchbaseCluster.create(env, host); bucket = cluster.openBucket(bucketName, bucketPassword); kvTimeout = env.kvTimeout(); } catch (Exception ex) { throw new DBException("Could not connect to Couchbase Bucket.", ex); } if (!kv && !syncMutResponse) { throw new DBException("Not waiting for N1QL responses on mutations not yet implemented."); } } /** * Helper method to log the CLI params so that on the command line debugging is easier. */ private void logParams() { StringBuilder sb = new StringBuilder(); sb.append("host=").append(host); sb.append(", bucket=").append(bucketName); sb.append(", upsert=").append(upsert); sb.append(", persistTo=").append(persistTo); sb.append(", replicateTo=").append(replicateTo); sb.append(", syncMutResponse=").append(syncMutResponse); sb.append(", adhoc=").append(adhoc); sb.append(", kv=").append(kv); sb.append(", maxParallelism=").append(maxParallelism); sb.append(", queryEndpoints=").append(queryEndpoints); sb.append(", kvEndpoints=").append(kvEndpoints); sb.append(", queryEndpoints=").append(queryEndpoints); sb.append(", epoll=").append(epoll); sb.append(", boost=").append(boost); sb.append(", networkMetricsInterval=").append(networkMetricsInterval); sb.append(", runtimeMetricsInterval=").append(runtimeMetricsInterval); LOGGER.info("===> Using Params: " + sb.toString()); } @Override public Status read(final String table, final String key, Set fields, - final HashMap result) { + final Map result) { try { String docId = formatId(table, key); if (kv) { return readKv(docId, fields, result); } else { return readN1ql(docId, fields, result); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** - * Performs the {@link #read(String, String, Set, HashMap)} operation via Key/Value ("get"). + * Performs the {@link #read(String, String, Set, Map)} operation via Key/Value ("get"). * * @param docId the document ID * @param fields the fields to be loaded * @param result the result map where the doc needs to be converted into * @return The result of the operation. */ - private Status readKv(final String docId, final Set fields, final HashMap result) + private Status readKv(final String docId, final Set fields, final Map result) throws Exception { RawJsonDocument loaded = bucket.get(docId, RawJsonDocument.class); if (loaded == null) { return Status.NOT_FOUND; } decode(loaded.content(), fields, result); return Status.OK; } /** - * Performs the {@link #read(String, String, Set, HashMap)} operation via N1QL ("SELECT"). + * Performs the {@link #read(String, String, Set, Map)} operation via N1QL ("SELECT"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * * @param docId the document ID * @param fields the fields to be loaded * @param result the result map where the doc needs to be converted into * @return The result of the operation. */ - private Status readN1ql(final String docId, Set fields, final HashMap result) + private Status readN1ql(final String docId, Set fields, final Map result) throws Exception { String readQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` USE KEYS [$1]"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( readQuery, JsonArray.from(docId), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + readQuery + ", Errors: " + queryResult.errors()); } N1qlQueryRow row; try { row = queryResult.rows().next(); } catch (NoSuchElementException ex) { return Status.NOT_FOUND; } JsonObject content = row.value(); if (fields == null) { content = content.getObject(bucketName); // n1ql result set scoped under *.bucketName fields = content.getNames(); } for (String field : fields) { Object value = content.get(field); result.put(field, new StringByteIterator(value != null ? value.toString() : "")); } return Status.OK; } @Override - public Status update(final String table, final String key, final HashMap values) { + public Status update(final String table, final String key, final Map values) { if (upsert) { return upsert(table, key, values); } try { String docId = formatId(table, key); if (kv) { return updateKv(docId, values); } else { return updateN1ql(docId, values); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** - * Performs the {@link #update(String, String, HashMap)} operation via Key/Value ("replace"). + * Performs the {@link #update(String, String, Map)} operation via Key/Value ("replace"). * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ - private Status updateKv(final String docId, final HashMap values) { + private Status updateKv(final String docId, final Map values) { waitForMutationResponse(bucket.async().replace( RawJsonDocument.create(docId, documentExpiry, encode(values)), persistTo, replicateTo )); return Status.OK; } /** - * Performs the {@link #update(String, String, HashMap)} operation via N1QL ("UPDATE"). + * Performs the {@link #update(String, String, Map)} operation via N1QL ("UPDATE"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ - private Status updateN1ql(final String docId, final HashMap values) + private Status updateN1ql(final String docId, final Map values) throws Exception { String fields = encodeN1qlFields(values); String updateQuery = "UPDATE `" + bucketName + "` USE KEYS [$1] SET " + fields; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( updateQuery, JsonArray.from(docId), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + updateQuery + ", Errors: " + queryResult.errors()); } return Status.OK; } @Override - public Status insert(final String table, final String key, final HashMap values) { + public Status insert(final String table, final String key, final Map values) { if (upsert) { return upsert(table, key, values); } try { String docId = formatId(table, key); if (kv) { return insertKv(docId, values); } else { return insertN1ql(docId, values); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** - * Performs the {@link #insert(String, String, HashMap)} operation via Key/Value ("INSERT"). + * Performs the {@link #insert(String, String, Map)} operation via Key/Value ("INSERT"). * * Note that during the "load" phase it makes sense to retry TMPFAILS (so that even if the server is * overloaded temporarily the ops will succeed eventually). The current code will retry TMPFAILs * for maximum of one minute and then bubble up the error. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ - private Status insertKv(final String docId, final HashMap values) { + private Status insertKv(final String docId, final Map values) { int tries = 60; // roughly 60 seconds with the 1 second sleep, not 100% accurate. for(int i = 0; i < tries; i++) { try { waitForMutationResponse(bucket.async().insert( RawJsonDocument.create(docId, documentExpiry, encode(values)), persistTo, replicateTo )); return Status.OK; } catch (TemporaryFailureException ex) { try { Thread.sleep(1000); } catch (InterruptedException e) { throw new RuntimeException("Interrupted while sleeping on TMPFAIL backoff.", ex); } } } throw new RuntimeException("Still receiving TMPFAIL from the server after trying " + tries + " times. " + "Check your server."); } /** - * Performs the {@link #insert(String, String, HashMap)} operation via N1QL ("INSERT"). + * Performs the {@link #insert(String, String, Map)} operation via N1QL ("INSERT"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ - private Status insertN1ql(final String docId, final HashMap values) + private Status insertN1ql(final String docId, final Map values) throws Exception { String insertQuery = "INSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( insertQuery, JsonArray.from(docId, valuesToJsonObject(values)), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + insertQuery + ", Errors: " + queryResult.errors()); } return Status.OK; } /** * Performs an upsert instead of insert or update using either Key/Value or N1QL. * * If this option should be used, the "-p couchbase.upsert=true" property must be set. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return The result of the operation. */ - private Status upsert(final String table, final String key, final HashMap values) { + private Status upsert(final String table, final String key, final Map values) { try { String docId = formatId(table, key); if (kv) { return upsertKv(docId, values); } else { return upsertN1ql(docId, values); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** - * Performs the {@link #upsert(String, String, HashMap)} operation via Key/Value ("upsert"). + * Performs the {@link #upsert(String, String, Map)} operation via Key/Value ("upsert"). * * If this option should be used, the "-p couchbase.upsert=true" property must be set. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ - private Status upsertKv(final String docId, final HashMap values) { + private Status upsertKv(final String docId, final Map values) { waitForMutationResponse(bucket.async().upsert( RawJsonDocument.create(docId, documentExpiry, encode(values)), persistTo, replicateTo )); return Status.OK; } /** - * Performs the {@link #upsert(String, String, HashMap)} operation via N1QL ("UPSERT"). + * Performs the {@link #upsert(String, String, Map)} operation via N1QL ("UPSERT"). * * If this option should be used, the "-p couchbase.upsert=true -p couchbase.kv=false" properties must be set. * * @param docId the document ID * @param values the values to update the document with. * @return The result of the operation. */ - private Status upsertN1ql(final String docId, final HashMap values) + private Status upsertN1ql(final String docId, final Map values) throws Exception { String upsertQuery = "UPSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( upsertQuery, JsonArray.from(docId, valuesToJsonObject(values)), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + upsertQuery + ", Errors: " + queryResult.errors()); } return Status.OK; } @Override public Status delete(final String table, final String key) { try { String docId = formatId(table, key); if (kv) { return deleteKv(docId); } else { return deleteN1ql(docId); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** * Performs the {@link #delete(String, String)} (String, String)} operation via Key/Value ("remove"). * * @param docId the document ID. * @return The result of the operation. */ private Status deleteKv(final String docId) { waitForMutationResponse(bucket.async().remove( docId, persistTo, replicateTo )); return Status.OK; } /** * Performs the {@link #delete(String, String)} (String, String)} operation via N1QL ("DELETE"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * * @param docId the document ID. * @return The result of the operation. */ private Status deleteN1ql(final String docId) throws Exception { String deleteQuery = "DELETE FROM `" + bucketName + "` USE KEYS [$1]"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( deleteQuery, JsonArray.from(docId), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new DBException("Error while parsing N1QL Result. Query: " + deleteQuery + ", Errors: " + queryResult.errors()); } return Status.OK; } @Override public Status scan(final String table, final String startkey, final int recordcount, final Set fields, final Vector> result) { try { if (fields == null || fields.isEmpty()) { return scanAllFields(table, startkey, recordcount, result); } else { return scanSpecificFields(table, startkey, recordcount, fields, result); } } catch (Exception ex) { ex.printStackTrace(); return Status.ERROR; } } /** * Performs the {@link #scan(String, String, int, Set, Vector)} operation, optimized for all fields. * * Since the full document bodies need to be loaded anyways, it makes sense to just grab the document IDs * from N1QL and then perform the bulk loading via KV for better performance. This is a usual pattern with * Couchbase and shows the benefits of using both N1QL and KV together. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return The result of the operation. */ private Status scanAllFields(final String table, final String startkey, final int recordcount, final Vector> result) { final List> data = new ArrayList>(recordcount); bucket.async() .query(N1qlQuery.parameterized( scanAllQuery, JsonArray.from(formatId(table, startkey), recordcount), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )) .doOnNext(new Action1() { @Override public void call(AsyncN1qlQueryResult result) { if (!result.parseSuccess()) { throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanAllQuery + ", Errors: " + result.errors()); } } }) .flatMap(new Func1>() { @Override public Observable call(AsyncN1qlQueryResult result) { return result.rows(); } }) .flatMap(new Func1>() { @Override public Observable call(AsyncN1qlQueryRow row) { String id = new String(row.byteValue()).trim(); return bucket.async().get(id.substring(1, id.length()-1), RawJsonDocument.class); } }) .map(new Func1>() { @Override public HashMap call(RawJsonDocument document) { HashMap tuple = new HashMap(); decode(document.content(), null, tuple); return tuple; } }) .toBlocking() .forEach(new Action1>() { @Override public void call(HashMap tuple) { data.add(tuple); } }); result.addAll(data); return Status.OK; } /** * Performs the {@link #scan(String, String, int, Set, Vector)} operation N1Ql only for a subset of the fields. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return The result of the operation. */ private Status scanSpecificFields(final String table, final String startkey, final int recordcount, final Set fields, final Vector> result) { String scanSpecQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` WHERE meta().id >= '$1' LIMIT $2"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( scanSpecQuery, JsonArray.from(formatId(table, startkey), recordcount), N1qlParams.build().adhoc(adhoc).maxParallelism(maxParallelism) )); if (!queryResult.parseSuccess() || !queryResult.finalSuccess()) { throw new RuntimeException("Error while parsing N1QL Result. Query: " + scanSpecQuery + ", Errors: " + queryResult.errors()); } boolean allFields = fields == null || fields.isEmpty(); result.ensureCapacity(recordcount); for (N1qlQueryRow row : queryResult) { JsonObject value = row.value(); if (fields == null) { value = value.getObject(bucketName); } Set f = allFields ? value.getNames() : fields; HashMap tuple = new HashMap(f.size()); for (String field : f) { tuple.put(field, new StringByteIterator(value.getString(field))); } result.add(tuple); } return Status.OK; } /** * Helper method to block on the response, depending on the property set. * * By default, since YCSB is sync the code will always wait for the operation to complete. In some * cases it can be useful to just "drive load" and disable the waiting. Note that when the * "-p couchbase.syncMutationResponse=false" option is used, the measured results by YCSB can basically * be thrown away. Still helpful sometimes during load phases to speed them up :) * * @param input the async input observable. */ private void waitForMutationResponse(final Observable> input) { if (!syncMutResponse) { ((Observable>)input).subscribe(new Subscriber>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { } @Override public void onNext(Document document) { } }); } else { Blocking.blockForSingle(input, kvTimeout, TimeUnit.MILLISECONDS); } } /** - * Helper method to turn the values into a String, used with {@link #upsertN1ql(String, HashMap)}. + * Helper method to turn the values into a String, used with {@link #upsertN1ql(String, Map)}. * * @param values the values to encode. * @return the encoded string. */ - private static String encodeN1qlFields(final HashMap values) { + private static String encodeN1qlFields(final Map values) { if (values.isEmpty()) { return ""; } StringBuilder sb = new StringBuilder(); for (Map.Entry entry : values.entrySet()) { String raw = entry.getValue().toString(); String escaped = raw.replace("\"", "\\\"").replace("\'", "\\\'"); sb.append(entry.getKey()).append("=\"").append(escaped).append("\" "); } String toReturn = sb.toString(); return toReturn.substring(0, toReturn.length() - 1); } /** * Helper method to turn the map of values into a {@link JsonObject} for further use. * * @param values the values to transform. * @return the created json object. */ - private static JsonObject valuesToJsonObject(final HashMap values) { + private static JsonObject valuesToJsonObject(final Map values) { JsonObject result = JsonObject.create(); for (Map.Entry entry : values.entrySet()) { result.put(entry.getKey(), entry.getValue().toString()); } return result; } /** * Helper method to join the set of fields into a String suitable for N1QL. * * @param fields the fields to join. * @return the joined fields as a String. */ private static String joinFields(final Set fields) { if (fields == null || fields.isEmpty()) { return "*"; } StringBuilder builder = new StringBuilder(); for (String f : fields) { builder.append("`").append(f).append("`").append(","); } String toReturn = builder.toString(); return toReturn.substring(0, toReturn.length() - 1); } /** * Helper method to turn the prefix and key into a proper document ID. * * @param prefix the prefix (table). * @param key the key itself. * @return a document ID that can be used with Couchbase. */ private static String formatId(final String prefix, final String key) { return prefix + SEPARATOR + key; } /** * Helper method to parse the "ReplicateTo" property on startup. * * @param property the proeprty to parse. * @return the parsed setting. */ private static ReplicateTo parseReplicateTo(final String property) throws DBException { int value = Integer.parseInt(property); switch (value) { case 0: return ReplicateTo.NONE; case 1: return ReplicateTo.ONE; case 2: return ReplicateTo.TWO; case 3: return ReplicateTo.THREE; default: throw new DBException("\"couchbase.replicateTo\" must be between 0 and 3"); } } /** * Helper method to parse the "PersistTo" property on startup. * * @param property the proeprty to parse. * @return the parsed setting. */ private static PersistTo parsePersistTo(final String property) throws DBException { int value = Integer.parseInt(property); switch (value) { case 0: return PersistTo.NONE; case 1: return PersistTo.ONE; case 2: return PersistTo.TWO; case 3: return PersistTo.THREE; case 4: return PersistTo.FOUR; default: throw new DBException("\"couchbase.persistTo\" must be between 0 and 4"); } } /** * Decode the String from server and pass it into the decoded destination. * * @param source the loaded object. * @param fields the fields to check. * @param dest the result passed back to YCSB. */ private void decode(final String source, final Set fields, - final HashMap dest) { + final Map dest) { try { JsonNode json = JacksonTransformers.MAPPER.readTree(source); boolean checkFields = fields != null && !fields.isEmpty(); for (Iterator> jsonFields = json.fields(); jsonFields.hasNext();) { Map.Entry jsonField = jsonFields.next(); String name = jsonField.getKey(); if (checkFields && !fields.contains(name)) { continue; } JsonNode jsonValue = jsonField.getValue(); if (jsonValue != null && !jsonValue.isNull()) { dest.put(name, new StringByteIterator(jsonValue.asText())); } } } catch (Exception e) { throw new RuntimeException("Could not decode JSON"); } } /** * Encode the source into a String for storage. * * @param source the source value. * @return the encoded string. */ - private String encode(final HashMap source) { - HashMap stringMap = StringByteIterator.getStringMap(source); + private String encode(final Map source) { + Map stringMap = StringByteIterator.getStringMap(source); ObjectNode node = JacksonTransformers.MAPPER.createObjectNode(); for (Map.Entry pair : stringMap.entrySet()) { node.put(pair.getKey(), pair.getValue()); } JsonFactory jsonFactory = new JsonFactory(); Writer writer = new StringWriter(); try { JsonGenerator jsonGenerator = jsonFactory.createGenerator(writer); JacksonTransformers.MAPPER.writeTree(jsonGenerator, node); } catch (Exception e) { throw new RuntimeException("Could not encode JSON value"); } return writer.toString(); } } /** * Factory for the {@link BackoffSelectStrategy} to be used with boosting. */ class BackoffSelectStrategyFactory implements SelectStrategyFactory { @Override public SelectStrategy newSelectStrategy() { return new BackoffSelectStrategy(); } } /** * Custom IO select strategy which trades CPU for throughput, used with the boost setting. */ class BackoffSelectStrategy implements SelectStrategy { private int counter = 0; @Override public int calculateStrategy(final IntSupplier supplier, final boolean hasTasks) throws Exception { int selectNowResult = supplier.get(); if (hasTasks || selectNowResult != 0) { counter = 0; return selectNowResult; } counter++; if (counter > 2000) { LockSupport.parkNanos(1); } else if (counter > 3000) { Thread.yield(); } else if (counter > 4000) { LockSupport.parkNanos(1000); } else if (counter > 5000) { // defer to blocking select counter = 0; return SelectStrategy.SELECT; } return SelectStrategy.CONTINUE; } } diff --git a/dynamodb/src/main/java/com/yahoo/ycsb/db/DynamoDBClient.java b/dynamodb/src/main/java/com/yahoo/ycsb/db/DynamoDBClient.java index b1f471f1..d861fd70 100644 --- a/dynamodb/src/main/java/com/yahoo/ycsb/db/DynamoDBClient.java +++ b/dynamodb/src/main/java/com/yahoo/ycsb/db/DynamoDBClient.java @@ -1,341 +1,340 @@ /* * Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved. * Copyright 2015-2016 YCSB Contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.yahoo.ycsb.db; import com.amazonaws.AmazonClientException; import com.amazonaws.AmazonServiceException; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.PropertiesCredentials; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient; import com.amazonaws.services.dynamodbv2.model.*; import com.yahoo.ycsb.*; import org.apache.log4j.Level; import org.apache.log4j.Logger; import java.io.File; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Vector; /** * DynamoDB v1.10.48 client for YCSB. */ public class DynamoDBClient extends DB { /** * Defines the primary key type used in this particular DB instance. *

* By default, the primary key type is "HASH". Optionally, the user can * choose to use hash_and_range key type. See documentation in the * DynamoDB.Properties file for more details. */ private enum PrimaryKeyType { HASH, HASH_AND_RANGE } private AmazonDynamoDBClient dynamoDB; private String primaryKeyName; private PrimaryKeyType primaryKeyType = PrimaryKeyType.HASH; // If the user choose to use HASH_AND_RANGE as primary key type, then // the following two variables become relevant. See documentation in the // DynamoDB.Properties file for more details. private String hashKeyValue; private String hashKeyName; private boolean consistentRead = false; private String endpoint = "http://dynamodb.us-east-1.amazonaws.com"; private int maxConnects = 50; private static final Logger LOGGER = Logger.getLogger(DynamoDBClient.class); private static final Status CLIENT_ERROR = new Status("CLIENT_ERROR", "An error occurred on the client."); private static final String DEFAULT_HASH_KEY_VALUE = "YCSB_0"; @Override public void init() throws DBException { String debug = getProperties().getProperty("dynamodb.debug", null); if (null != debug && "true".equalsIgnoreCase(debug)) { LOGGER.setLevel(Level.DEBUG); } String configuredEndpoint = getProperties().getProperty("dynamodb.endpoint", null); String credentialsFile = getProperties().getProperty("dynamodb.awsCredentialsFile", null); String primaryKey = getProperties().getProperty("dynamodb.primaryKey", null); String primaryKeyTypeString = getProperties().getProperty("dynamodb.primaryKeyType", null); String consistentReads = getProperties().getProperty("dynamodb.consistentReads", null); String connectMax = getProperties().getProperty("dynamodb.connectMax", null); if (null != connectMax) { this.maxConnects = Integer.parseInt(connectMax); } if (null != consistentReads && "true".equalsIgnoreCase(consistentReads)) { this.consistentRead = true; } if (null != configuredEndpoint) { this.endpoint = configuredEndpoint; } if (null == primaryKey || primaryKey.length() < 1) { throw new DBException("Missing primary key attribute name, cannot continue"); } if (null != primaryKeyTypeString) { try { this.primaryKeyType = PrimaryKeyType.valueOf(primaryKeyTypeString.trim().toUpperCase()); } catch (IllegalArgumentException e) { throw new DBException("Invalid primary key mode specified: " + primaryKeyTypeString + ". Expecting HASH or HASH_AND_RANGE."); } } if (this.primaryKeyType == PrimaryKeyType.HASH_AND_RANGE) { // When the primary key type is HASH_AND_RANGE, keys used by YCSB // are range keys so we can benchmark performance of individual hash // partitions. In this case, the user must specify the hash key's name // and optionally can designate a value for the hash key. String configuredHashKeyName = getProperties().getProperty("dynamodb.hashKeyName", null); if (null == configuredHashKeyName || configuredHashKeyName.isEmpty()) { throw new DBException("Must specify a non-empty hash key name when the primary key type is HASH_AND_RANGE."); } this.hashKeyName = configuredHashKeyName; this.hashKeyValue = getProperties().getProperty("dynamodb.hashKeyValue", DEFAULT_HASH_KEY_VALUE); } try { AWSCredentials credentials = new PropertiesCredentials(new File(credentialsFile)); ClientConfiguration cconfig = new ClientConfiguration(); cconfig.setMaxConnections(maxConnects); dynamoDB = new AmazonDynamoDBClient(credentials, cconfig); dynamoDB.setEndpoint(this.endpoint); primaryKeyName = primaryKey; LOGGER.info("dynamodb connection created with " + this.endpoint); } catch (Exception e1) { LOGGER.error("DynamoDBClient.init(): Could not initialize DynamoDB client.", e1); } } @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("readkey: " + key + " from table: " + table); } GetItemRequest req = new GetItemRequest(table, createPrimaryKey(key)); req.setAttributesToGet(fields); req.setConsistentRead(consistentRead); GetItemResult res; try { res = dynamoDB.getItem(req); } catch (AmazonServiceException ex) { LOGGER.error(ex); return Status.ERROR; } catch (AmazonClientException ex) { LOGGER.error(ex); return CLIENT_ERROR; } if (null != res.getItem()) { result.putAll(extractResult(res.getItem())); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Result: " + res.toString()); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("scan " + recordcount + " records from key: " + startkey + " on table: " + table); } /* * on DynamoDB's scan, startkey is *exclusive* so we need to * getItem(startKey) and then use scan for the res */ GetItemRequest greq = new GetItemRequest(table, createPrimaryKey(startkey)); greq.setAttributesToGet(fields); GetItemResult gres; try { gres = dynamoDB.getItem(greq); } catch (AmazonServiceException ex) { LOGGER.error(ex); return Status.ERROR; } catch (AmazonClientException ex) { LOGGER.error(ex); return CLIENT_ERROR; } if (null != gres.getItem()) { result.add(extractResult(gres.getItem())); } int count = 1; // startKey is done, rest to go. Map startKey = createPrimaryKey(startkey); ScanRequest req = new ScanRequest(table); req.setAttributesToGet(fields); while (count < recordcount) { req.setExclusiveStartKey(startKey); req.setLimit(recordcount - count); ScanResult res; try { res = dynamoDB.scan(req); } catch (AmazonServiceException ex) { LOGGER.error(ex); return Status.ERROR; } catch (AmazonClientException ex) { LOGGER.error(ex); return CLIENT_ERROR; } count += res.getCount(); for (Map items : res.getItems()) { result.add(extractResult(items)); } startKey = res.getLastEvaluatedKey(); } return Status.OK; } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("updatekey: " + key + " from table: " + table); } Map attributes = new HashMap<>(values.size()); for (Entry val : values.entrySet()) { AttributeValue v = new AttributeValue(val.getValue().toString()); attributes.put(val.getKey(), new AttributeValueUpdate().withValue(v).withAction("PUT")); } UpdateItemRequest req = new UpdateItemRequest(table, createPrimaryKey(key), attributes); try { dynamoDB.updateItem(req); } catch (AmazonServiceException ex) { LOGGER.error(ex); return Status.ERROR; } catch (AmazonClientException ex) { LOGGER.error(ex); return CLIENT_ERROR; } return Status.OK; } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("insertkey: " + primaryKeyName + "-" + key + " from table: " + table); } Map attributes = createAttributes(values); // adding primary key attributes.put(primaryKeyName, new AttributeValue(key)); if (primaryKeyType == PrimaryKeyType.HASH_AND_RANGE) { // If the primary key type is HASH_AND_RANGE, then what has been put // into the attributes map above is the range key part of the primary // key, we still need to put in the hash key part here. attributes.put(hashKeyName, new AttributeValue(hashKeyValue)); } PutItemRequest putItemRequest = new PutItemRequest(table, attributes); try { dynamoDB.putItem(putItemRequest); } catch (AmazonServiceException ex) { LOGGER.error(ex); return Status.ERROR; } catch (AmazonClientException ex) { LOGGER.error(ex); return CLIENT_ERROR; } return Status.OK; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("deletekey: " + key + " from table: " + table); } DeleteItemRequest req = new DeleteItemRequest(table, createPrimaryKey(key)); try { dynamoDB.deleteItem(req); } catch (AmazonServiceException ex) { LOGGER.error(ex); return Status.ERROR; } catch (AmazonClientException ex) { LOGGER.error(ex); return CLIENT_ERROR; } return Status.OK; } - private static Map createAttributes(HashMap values) { - //leave space for the PrimaryKey + private static Map createAttributes(Map values) { Map attributes = new HashMap<>(values.size() + 1); for (Entry val : values.entrySet()) { attributes.put(val.getKey(), new AttributeValue(val.getValue().toString())); } return attributes; } private HashMap extractResult(Map item) { if (null == item) { return null; } HashMap rItems = new HashMap<>(item.size()); for (Entry attr : item.entrySet()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Result- key: %s, value: %s", attr.getKey(), attr.getValue())); } rItems.put(attr.getKey(), new StringByteIterator(attr.getValue().getS())); } return rItems; } private Map createPrimaryKey(String key) { Map k = new HashMap<>(); if (primaryKeyType == PrimaryKeyType.HASH) { k.put(primaryKeyName, new AttributeValue().withS(key)); } else if (primaryKeyType == PrimaryKeyType.HASH_AND_RANGE) { k.put(hashKeyName, new AttributeValue().withS(hashKeyValue)); k.put(primaryKeyName, new AttributeValue().withS(key)); } else { throw new RuntimeException("Assertion Error: impossible primary key type"); } return k; } } diff --git a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java index 76ddee1d..12c3feba 100644 --- a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java +++ b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java @@ -1,369 +1,370 @@ /** * Copyright (c) 2012 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import static org.elasticsearch.common.settings.Settings.Builder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.node.NodeBuilder.nodeBuilder; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.node.Node; import org.elasticsearch.search.SearchHit; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * Elasticsearch client for YCSB framework. * *

* Default properties to set: *

*
    *
  • cluster.name = es.ycsb.cluster *
  • es.index.key = es.ycsb *
  • es.number_of_shards = 1 *
  • es.number_of_replicas = 0 *
*/ public class ElasticsearchClient extends DB { private static final String DEFAULT_CLUSTER_NAME = "es.ycsb.cluster"; private static final String DEFAULT_INDEX_KEY = "es.ycsb"; private static final String DEFAULT_REMOTE_HOST = "localhost:9300"; private static final int NUMBER_OF_SHARDS = 1; private static final int NUMBER_OF_REPLICAS = 0; private Node node; private Client client; private String indexKey; private Boolean remoteMode; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { final Properties props = getProperties(); // Check if transport client needs to be used (To connect to multiple // elasticsearch nodes) remoteMode = Boolean.parseBoolean(props.getProperty("es.remote", "false")); final String pathHome = props.getProperty("path.home"); // when running in embedded mode, require path.home if (!remoteMode && (pathHome == null || pathHome.isEmpty())) { throw new IllegalArgumentException("path.home must be specified when running in embedded mode"); } this.indexKey = props.getProperty("es.index.key", DEFAULT_INDEX_KEY); int numberOfShards = parseIntegerProperty(props, "es.number_of_shards", NUMBER_OF_SHARDS); int numberOfReplicas = parseIntegerProperty(props, "es.number_of_replicas", NUMBER_OF_REPLICAS); Boolean newdb = Boolean.parseBoolean(props.getProperty("es.newdb", "false")); Builder settings = Settings.settingsBuilder() .put("cluster.name", DEFAULT_CLUSTER_NAME) .put("node.local", Boolean.toString(!remoteMode)) .put("path.home", pathHome); // if properties file contains elasticsearch user defined properties // add it to the settings file (will overwrite the defaults). settings.put(props); final String clusterName = settings.get("cluster.name"); System.err.println("Elasticsearch starting node = " + clusterName); System.err.println("Elasticsearch node path.home = " + settings.get("path.home")); System.err.println("Elasticsearch Remote Mode = " + remoteMode); // Remote mode support for connecting to remote elasticsearch cluster if (remoteMode) { settings.put("client.transport.sniff", true) .put("client.transport.ignore_cluster_name", false) .put("client.transport.ping_timeout", "30s") .put("client.transport.nodes_sampler_interval", "30s"); // Default it to localhost:9300 String[] nodeList = props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST).split(","); System.out.println("Elasticsearch Remote Hosts = " + props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST)); TransportClient tClient = TransportClient.builder().settings(settings).build(); for (String h : nodeList) { String[] nodes = h.split(":"); try { tClient.addTransportAddress(new InetSocketTransportAddress( InetAddress.getByName(nodes[0]), Integer.parseInt(nodes[1]) )); } catch (NumberFormatException e) { throw new IllegalArgumentException("Unable to parse port number.", e); } catch (UnknownHostException e) { throw new IllegalArgumentException("Unable to Identify host.", e); } } client = tClient; } else { // Start node only if transport client mode is disabled node = nodeBuilder().clusterName(clusterName).settings(settings).node(); node.start(); client = node.client(); } final boolean exists = client.admin().indices() .exists(Requests.indicesExistsRequest(indexKey)).actionGet() .isExists(); if (exists && newdb) { client.admin().indices().prepareDelete(indexKey).execute().actionGet(); } if (!exists || newdb) { client.admin().indices().create( new CreateIndexRequest(indexKey) .settings( Settings.builder() .put("index.number_of_shards", numberOfShards) .put("index.number_of_replicas", numberOfReplicas) .put("index.mapping._id.indexed", true) )).actionGet(); } client.admin().cluster().health(new ClusterHealthRequest().waitForGreenStatus()).actionGet(); } private int parseIntegerProperty(Properties properties, String key, int defaultValue) { String value = properties.getProperty(key); return value == null ? defaultValue : Integer.parseInt(value); } @Override public void cleanup() throws DBException { if (!remoteMode) { if (!node.isClosed()) { client.close(); node.close(); } } else { client.close(); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { final XContentBuilder doc = jsonBuilder().startObject(); for (Entry entry : StringByteIterator.getStringMap(values).entrySet()) { doc.field(entry.getKey(), entry.getValue()); } doc.endObject(); client.prepareIndex(indexKey, table, key).setSource(doc).execute().actionGet(); return Status.OK; } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { DeleteResponse response = client.prepareDelete(indexKey, table, key).execute().actionGet(); if (response.isFound()) { return Status.OK; } else { return Status.NOT_FOUND; } } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet(); if (response.isExists()) { if (fields != null) { for (String field : fields) { result.put(field, new StringByteIterator( (String) response.getSource().get(field))); } } else { for (String field : response.getSource().keySet()) { result.put(field, new StringByteIterator( (String) response.getSource().get(field))); } } return Status.OK; } else { return Status.NOT_FOUND; } } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { try { final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet(); if (response.isExists()) { for (Entry entry : StringByteIterator.getStringMap(values).entrySet()) { response.getSource().put(entry.getKey(), entry.getValue()); } client.prepareIndex(indexKey, table, key).setSource(response.getSource()).execute().actionGet(); return Status.OK; } else { return Status.NOT_FOUND; } } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status scan( String table, String startkey, int recordcount, Set fields, Vector> result) { try { final RangeQueryBuilder rangeQuery = rangeQuery("_id").gte(startkey); final SearchResponse response = client.prepareSearch(indexKey) .setTypes(table) .setQuery(rangeQuery) .setSize(recordcount) .execute() .actionGet(); HashMap entry; for (SearchHit hit : response.getHits()) { entry = new HashMap<>(fields.size()); for (String field : fields) { entry.put(field, new StringByteIterator((String) hit.getSource().get(field))); } result.add(entry); } return Status.OK; } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } } diff --git a/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java b/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java index 8aa2af76..aab0c99c 100644 --- a/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java +++ b/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java @@ -1,211 +1,211 @@ /** * Copyright (c) 2013 - 2016 YCSB Contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import org.apache.geode.cache.*; import org.apache.geode.cache.client.ClientCache; import org.apache.geode.cache.client.ClientCacheFactory; import org.apache.geode.cache.client.ClientRegionFactory; import org.apache.geode.cache.client.ClientRegionShortcut; import org.apache.geode.internal.admin.remote.DistributionLocatorId; import org.apache.geode.internal.cache.GemFireCacheImpl; import org.apache.geode.pdx.JSONFormatter; import org.apache.geode.pdx.PdxInstance; import org.apache.geode.pdx.PdxInstanceFactory; import com.yahoo.ycsb.*; import java.util.*; /** * Apache Geode (incubating) client for the YCSB benchmark.
*

By default acts as a Geode client and tries to connect * to Geode cache server running on localhost with default * cache server port. Hostname and port of a Geode cacheServer * can be provided using geode.serverport=port and * geode.serverhost=host properties on YCSB command line. * A locator may also be used for discovering a cacheServer * by using the property geode.locator=host[port]

*

*

To run this client in a peer-to-peer topology with other Geode * nodes, use the property geode.topology=p2p. Running * in p2p mode will enable embedded caching in this client.

*

*

YCSB by default does its operations against "usertable". When running * as a client this is a ClientRegionShortcut.PROXY region, * when running in p2p mode it is a RegionShortcut.PARTITION * region. A cache.xml defining "usertable" region can be placed in the * working directory to override these region definitions.

*/ public class GeodeClient extends DB { /** * property name of the port where Geode server is listening for connections. */ private static final String SERVERPORT_PROPERTY_NAME = "geode.serverport"; /** * property name of the host where Geode server is running. */ private static final String SERVERHOST_PROPERTY_NAME = "geode.serverhost"; /** * default value of {@link #SERVERHOST_PROPERTY_NAME}. */ private static final String SERVERHOST_PROPERTY_DEFAULT = "localhost"; /** * property name to specify a Geode locator. This property can be used in both * client server and p2p topology */ private static final String LOCATOR_PROPERTY_NAME = "geode.locator"; /** * property name to specify Geode topology. */ private static final String TOPOLOGY_PROPERTY_NAME = "geode.topology"; /** * value of {@value #TOPOLOGY_PROPERTY_NAME} when peer to peer topology should be used. * (client-server topology is default) */ private static final String TOPOLOGY_P2P_VALUE = "p2p"; private GemFireCache cache; /** * true if ycsb client runs as a client to a Geode cache server. */ private boolean isClient; @Override public void init() throws DBException { Properties props = getProperties(); // hostName where Geode cacheServer is running String serverHost = null; // port of Geode cacheServer int serverPort = 0; String locatorStr = null; if (props != null && !props.isEmpty()) { String serverPortStr = props.getProperty(SERVERPORT_PROPERTY_NAME); if (serverPortStr != null) { serverPort = Integer.parseInt(serverPortStr); } serverHost = props.getProperty(SERVERHOST_PROPERTY_NAME, SERVERHOST_PROPERTY_DEFAULT); locatorStr = props.getProperty(LOCATOR_PROPERTY_NAME); String topology = props.getProperty(TOPOLOGY_PROPERTY_NAME); if (topology != null && topology.equals(TOPOLOGY_P2P_VALUE)) { CacheFactory cf = new CacheFactory(); if (locatorStr != null) { cf.set("locators", locatorStr); } cache = cf.create(); isClient = false; return; } } isClient = true; DistributionLocatorId locator = null; if (locatorStr != null) { locator = new DistributionLocatorId(locatorStr); } ClientCacheFactory ccf = new ClientCacheFactory(); ccf.setPdxReadSerialized(true); if (serverPort != 0) { ccf.addPoolServer(serverHost, serverPort); } else if (locator != null) { ccf.addPoolLocator(locator.getHost().getCanonicalHostName(), locator.getPort()); } cache = ccf.create(); } @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { Region r = getRegion(table); PdxInstance val = r.get(key); if (val != null) { if (fields == null) { for (String fieldName : val.getFieldNames()) { result.put(fieldName, new ByteArrayByteIterator((byte[]) val.getField(fieldName))); } } else { for (String field : fields) { result.put(field, new ByteArrayByteIterator((byte[]) val.getField(field))); } } return Status.OK; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { // Geode does not support scan return Status.ERROR; } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { getRegion(table).put(key, convertToBytearrayMap(values)); return Status.OK; } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { getRegion(table).put(key, convertToBytearrayMap(values)); return Status.OK; } @Override public Status delete(String table, String key) { getRegion(table).destroy(key); return Status.OK; } private PdxInstance convertToBytearrayMap(Map values) { GemFireCacheImpl gci = (GemFireCacheImpl) CacheFactory.getAnyInstance(); PdxInstanceFactory pdxInstanceFactory = gci.createPdxInstanceFactory(JSONFormatter.JSON_CLASSNAME); for (Map.Entry entry : values.entrySet()) { pdxInstanceFactory.writeByteArray(entry.getKey(), entry.getValue().toArray()); } return pdxInstanceFactory.create(); } private Region getRegion(String table) { Region r = cache.getRegion(table); if (r == null) { try { if (isClient) { ClientRegionFactory crf = ((ClientCache) cache).createClientRegionFactory(ClientRegionShortcut.PROXY); r = crf.create(table); } else { RegionFactory rf = ((Cache) cache).createRegionFactory(RegionShortcut.PARTITION); r = rf.create(table); } } catch (RegionExistsException e) { // another thread created the region r = cache.getRegion(table); } } return r; } } diff --git a/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java index c035032c..8e0c7b00 100644 --- a/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java +++ b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java @@ -1,455 +1,456 @@ /** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import java.io.IOException; import java.nio.charset.Charset; import java.util.Arrays; import java.util.HashMap; +import java.util.Map; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.util.Bytes; import java.util.Set; import java.util.Vector; import java.util.concurrent.ExecutionException; import com.google.bigtable.repackaged.com.google.protobuf.ByteString; import com.google.bigtable.v2.Column; import com.google.bigtable.v2.Family; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.Mutation; import com.google.bigtable.v2.ReadRowsRequest; import com.google.bigtable.v2.Row; import com.google.bigtable.v2.RowFilter; import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.RowSet; import com.google.bigtable.v2.Mutation.DeleteFromRow; import com.google.bigtable.v2.Mutation.SetCell; import com.google.bigtable.v2.RowFilter.Chain.Builder; import com.google.cloud.bigtable.config.BigtableOptions; import com.google.cloud.bigtable.grpc.BigtableDataClient; import com.google.cloud.bigtable.grpc.BigtableSession; import com.google.cloud.bigtable.grpc.BigtableTableName; import com.google.cloud.bigtable.grpc.async.AsyncExecutor; import com.google.cloud.bigtable.grpc.async.BulkMutation; import com.google.cloud.bigtable.hbase.BigtableOptionsFactory; import com.google.cloud.bigtable.util.ByteStringer; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; /** * Google Bigtable Proto client for YCSB framework. * * Bigtable offers two APIs. These include a native Protobuf GRPC API as well as * an HBase API wrapper for the GRPC API. This client implements the Protobuf * API to test the underlying calls wrapped up in the HBase API. To use the * HBase API, see the hbase10 client binding. */ public class GoogleBigtableClient extends com.yahoo.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); /** Property names for the CLI. */ private static final String ASYNC_MUTATOR_MAX_MEMORY = "mutatorMaxMemory"; private static final String ASYNC_MAX_INFLIGHT_RPCS = "mutatorMaxInflightRPCs"; private static final String CLIENT_SIDE_BUFFERING = "clientbuffering"; /** Tracks running thread counts so we know when to close the session. */ private static int threadCount = 0; /** This will load the hbase-site.xml config file and/or store CLI options. */ private static final Configuration CONFIG = HBaseConfiguration.create(); /** Print debug information to standard out. */ private boolean debug = false; /** Global Bigtable native API objects. */ private static BigtableOptions options; private static BigtableSession session; /** Thread loacal Bigtable native API objects. */ private BigtableDataClient client; private AsyncExecutor asyncExecutor; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; /** * If true, buffer mutations on the client. For measuring insert/update/delete * latencies, client side buffering should be disabled. */ private boolean clientSideBuffering = false; private BulkMutation bulkMutation; @Override public void init() throws DBException { Properties props = getProperties(); // Defaults the user can override if needed if (getProperties().containsKey(ASYNC_MUTATOR_MAX_MEMORY)) { CONFIG.set(BigtableOptionsFactory.BIGTABLE_BUFFERED_MUTATOR_MAX_MEMORY_KEY, getProperties().getProperty(ASYNC_MUTATOR_MAX_MEMORY)); } if (getProperties().containsKey(ASYNC_MAX_INFLIGHT_RPCS)) { CONFIG.set(BigtableOptionsFactory.BIGTABLE_BULK_MAX_ROW_KEY_COUNT, getProperties().getProperty(ASYNC_MAX_INFLIGHT_RPCS)); } // make it easy on ourselves by copying all CLI properties into the config object. final Iterator> it = props.entrySet().iterator(); while (it.hasNext()) { Entry entry = it.next(); CONFIG.set((String)entry.getKey(), (String)entry.getValue()); } clientSideBuffering = getProperties().getProperty(CLIENT_SIDE_BUFFERING, "false") .equals("true") ? true : false; System.err.println("Running Google Bigtable with Proto API" + (clientSideBuffering ? " and client side buffering." : ".")); synchronized (CONFIG) { ++threadCount; if (session == null) { try { options = BigtableOptionsFactory.fromConfiguration(CONFIG); session = new BigtableSession(options); // important to instantiate the first client here, otherwise the // other threads may receive an NPE from the options when they try // to read the cluster name. client = session.getDataClient(); } catch (IOException e) { throw new DBException("Error loading options from config: ", e); } } else { client = session.getDataClient(); } if (clientSideBuffering) { asyncExecutor = session.createAsyncExecutor(); } } if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } final String columnFamily = getProperties().getProperty("columnfamily"); if (columnFamily == null) { System.err.println("Error, must specify a columnfamily for Bigtable table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = Bytes.toBytes(columnFamily); } @Override public void cleanup() throws DBException { if (bulkMutation != null) { try { bulkMutation.flush(); } catch(RuntimeException e){ throw new DBException(e); } } if (asyncExecutor != null) { try { asyncExecutor.flush(); } catch (IOException e) { throw new DBException(e); } } synchronized (CONFIG) { --threadCount; if (threadCount <= 0) { try { session.close(); } catch (IOException e) { throw new DBException(e); } } } } @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { if (debug) { System.out.println("Doing read from Bigtable columnfamily " + new String(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } setTable(table); RowFilter filter = RowFilter.newBuilder() .setFamilyNameRegexFilterBytes(ByteStringer.wrap(columnFamilyBytes)) .build(); if (fields != null && fields.size() > 0) { Builder filterChain = RowFilter.Chain.newBuilder(); filterChain.addFilters(filter); filterChain.addFilters(RowFilter.newBuilder() .setCellsPerColumnLimitFilter(1) .build()); int count = 0; // usually "field#" so pre-alloc final StringBuilder regex = new StringBuilder(fields.size() * 6); for (final String field : fields) { if (count++ > 0) { regex.append("|"); } regex.append(field); } filterChain.addFilters(RowFilter.newBuilder() .setColumnQualifierRegexFilter( ByteStringer.wrap(regex.toString().getBytes()))).build(); filter = RowFilter.newBuilder().setChain(filterChain.build()).build(); } final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder() .setTableNameBytes(ByteStringer.wrap(lastTableBytes)) .setFilter(filter) .setRows(RowSet.newBuilder() .addRowKeys(ByteStringer.wrap(key.getBytes()))); List rows; try { rows = client.readRowsAsync(rrr.build()).get(); if (rows == null || rows.isEmpty()) { return Status.NOT_FOUND; } for (final Row row : rows) { for (final Family family : row.getFamiliesList()) { if (Arrays.equals(family.getNameBytes().toByteArray(), columnFamilyBytes)) { for (final Column column : family.getColumnsList()) { // we should only have a single cell per column result.put(column.getQualifier().toString(UTF8_CHARSET), new ByteArrayByteIterator(column.getCells(0).getValue().toByteArray())); if (debug) { System.out.println( "Result for field: " + column.getQualifier().toString(UTF8_CHARSET) + " is: " + column.getCells(0).getValue().toString(UTF8_CHARSET)); } } } } } return Status.OK; } catch (InterruptedException e) { System.err.println("Interrupted during get: " + e); Thread.currentThread().interrupt(); return Status.ERROR; } catch (ExecutionException e) { System.err.println("Exception during get: " + e); return Status.ERROR; } } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { setTable(table); RowFilter filter = RowFilter.newBuilder() .setFamilyNameRegexFilterBytes(ByteStringer.wrap(columnFamilyBytes)) .build(); if (fields != null && fields.size() > 0) { Builder filterChain = RowFilter.Chain.newBuilder(); filterChain.addFilters(filter); filterChain.addFilters(RowFilter.newBuilder() .setCellsPerColumnLimitFilter(1) .build()); int count = 0; // usually "field#" so pre-alloc final StringBuilder regex = new StringBuilder(fields.size() * 6); for (final String field : fields) { if (count++ > 0) { regex.append("|"); } regex.append(field); } filterChain.addFilters(RowFilter.newBuilder() .setColumnQualifierRegexFilter( ByteStringer.wrap(regex.toString().getBytes()))).build(); filter = RowFilter.newBuilder().setChain(filterChain.build()).build(); } final RowRange range = RowRange.newBuilder() .setStartKeyClosed(ByteStringer.wrap(startkey.getBytes())) .build(); final RowSet rowSet = RowSet.newBuilder() .addRowRanges(range) .build(); final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder() .setTableNameBytes(ByteStringer.wrap(lastTableBytes)) .setFilter(filter) .setRows(rowSet); List rows; try { rows = client.readRowsAsync(rrr.build()).get(); if (rows == null || rows.isEmpty()) { return Status.NOT_FOUND; } int numResults = 0; for (final Row row : rows) { final HashMap rowResult = new HashMap(fields != null ? fields.size() : 10); for (final Family family : row.getFamiliesList()) { if (Arrays.equals(family.getNameBytes().toByteArray(), columnFamilyBytes)) { for (final Column column : family.getColumnsList()) { // we should only have a single cell per column rowResult.put(column.getQualifier().toString(UTF8_CHARSET), new ByteArrayByteIterator(column.getCells(0).getValue().toByteArray())); if (debug) { System.out.println( "Result for field: " + column.getQualifier().toString(UTF8_CHARSET) + " is: " + column.getCells(0).getValue().toString(UTF8_CHARSET)); } } } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } return Status.OK; } catch (InterruptedException e) { System.err.println("Interrupted during scan: " + e); Thread.currentThread().interrupt(); return Status.ERROR; } catch (ExecutionException e) { System.err.println("Exception during scan: " + e); return Status.ERROR; } } @Override public Status update(String table, String key, - HashMap values) { + Map values) { if (debug) { System.out.println("Setting up put for key: " + key); } setTable(table); final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder(); rowMutation.setRowKey(ByteString.copyFromUtf8(key)); rowMutation.setTableNameBytes(ByteStringer.wrap(lastTableBytes)); for (final Entry entry : values.entrySet()) { final Mutation.Builder mutationBuilder = rowMutation.addMutationsBuilder(); final SetCell.Builder setCellBuilder = mutationBuilder.getSetCellBuilder(); setCellBuilder.setFamilyNameBytes(ByteStringer.wrap(columnFamilyBytes)); setCellBuilder.setColumnQualifier(ByteStringer.wrap(entry.getKey().getBytes())); setCellBuilder.setValue(ByteStringer.wrap(entry.getValue().toArray())); // Bigtable uses a 1ms granularity setCellBuilder.setTimestampMicros(System.currentTimeMillis() * 1000); } try { if (clientSideBuffering) { bulkMutation.add(rowMutation.build()); } else { client.mutateRow(rowMutation.build()); } return Status.OK; } catch (RuntimeException e) { System.err.println("Failed to insert key: " + key + " " + e.getMessage()); return Status.ERROR; } } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { return update(table, key, values); } @Override public Status delete(String table, String key) { if (debug) { System.out.println("Doing delete for key: " + key); } setTable(table); final MutateRowRequest.Builder rowMutation = MutateRowRequest.newBuilder() .setRowKey(ByteString.copyFromUtf8(key)) .setTableNameBytes(ByteStringer.wrap(lastTableBytes)); rowMutation.addMutationsBuilder().setDeleteFromRow( DeleteFromRow.getDefaultInstance()); try { if (clientSideBuffering) { bulkMutation.add(rowMutation.build()); } else { client.mutateRow(rowMutation.build()); } return Status.OK; } catch (RuntimeException e) { System.err.println("Failed to delete key: " + key + " " + e.getMessage()); return Status.ERROR; } } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; BigtableTableName tableName = options .getInstanceName() .toTableName(table); lastTableBytes = tableName .toString() .getBytes(); synchronized(this) { if (bulkMutation != null) { bulkMutation.flush(); } bulkMutation = session.createBulkMutation(tableName, asyncExecutor); } } } } \ No newline at end of file diff --git a/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java b/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java index 7eb35b1e..62db5833 100644 --- a/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java +++ b/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java @@ -1,335 +1,335 @@ /* * Copyright 2015 YCSB contributors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.google.api.client.auth.oauth2.Credential; import com.google.datastore.v1.*; import com.google.datastore.v1.CommitRequest.Mode; import com.google.datastore.v1.ReadOptions.ReadConsistency; import com.google.datastore.v1.client.Datastore; import com.google.datastore.v1.client.DatastoreException; import com.google.datastore.v1.client.DatastoreFactory; import com.google.datastore.v1.client.DatastoreHelper; import com.google.datastore.v1.client.DatastoreOptions; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import org.apache.log4j.Level; import org.apache.log4j.Logger; import java.io.IOException; import java.security.GeneralSecurityException; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Vector; import javax.annotation.Nullable; /** * Google Cloud Datastore Client for YCSB. */ public class GoogleDatastoreClient extends DB { /** * Defines a MutationType used in this class. */ private enum MutationType { UPSERT, UPDATE, DELETE } /** * Defines a EntityGroupingMode enum used in this class. */ private enum EntityGroupingMode { ONE_ENTITY_PER_GROUP, MULTI_ENTITY_PER_GROUP } private static Logger logger = Logger.getLogger(GoogleDatastoreClient.class); // Read consistency defaults to "STRONG" per YCSB guidance. // User can override this via configure. private ReadConsistency readConsistency = ReadConsistency.STRONG; private EntityGroupingMode entityGroupingMode = EntityGroupingMode.ONE_ENTITY_PER_GROUP; private String rootEntityName; private Datastore datastore = null; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. */ @Override public void init() throws DBException { String debug = getProperties().getProperty("googledatastore.debug", null); if (null != debug && "true".equalsIgnoreCase(debug)) { logger.setLevel(Level.DEBUG); } // We need the following 3 essential properties to initialize datastore: // // - DatasetId, // - Path to private key file, // - Service account email address. String datasetId = getProperties().getProperty( "googledatastore.datasetId", null); if (datasetId == null) { throw new DBException( "Required property \"datasetId\" missing."); } String privateKeyFile = getProperties().getProperty( "googledatastore.privateKeyFile", null); if (privateKeyFile == null) { throw new DBException( "Required property \"privateKeyFile\" missing."); } String serviceAccountEmail = getProperties().getProperty( "googledatastore.serviceAccountEmail", null); if (serviceAccountEmail == null) { throw new DBException( "Required property \"serviceAccountEmail\" missing."); } // Below are properties related to benchmarking. String readConsistencyConfig = getProperties().getProperty( "googledatastore.readConsistency", null); if (readConsistencyConfig != null) { try { this.readConsistency = ReadConsistency.valueOf( readConsistencyConfig.trim().toUpperCase()); } catch (IllegalArgumentException e) { throw new DBException("Invalid read consistency specified: " + readConsistencyConfig + ". Expecting STRONG or EVENTUAL."); } } // // Entity Grouping Mode (googledatastore.entitygroupingmode), see // documentation in conf/googledatastore.properties. // String entityGroupingConfig = getProperties().getProperty( "googledatastore.entityGroupingMode", null); if (entityGroupingConfig != null) { try { this.entityGroupingMode = EntityGroupingMode.valueOf( entityGroupingConfig.trim().toUpperCase()); } catch (IllegalArgumentException e) { throw new DBException("Invalid entity grouping mode specified: " + entityGroupingConfig + ". Expecting ONE_ENTITY_PER_GROUP or " + "MULTI_ENTITY_PER_GROUP."); } } this.rootEntityName = getProperties().getProperty( "googledatastore.rootEntityName", "YCSB_ROOT_ENTITY"); try { // Setup the connection to Google Cloud Datastore with the credentials // obtained from the configure. DatastoreOptions.Builder options = new DatastoreOptions.Builder(); Credential credential = DatastoreHelper.getServiceAccountCredential( serviceAccountEmail, privateKeyFile); logger.info("Using JWT Service Account credential."); logger.info("DatasetID: " + datasetId + ", Service Account Email: " + serviceAccountEmail + ", Private Key File Path: " + privateKeyFile); datastore = DatastoreFactory.get().create( options.credential(credential).projectId(datasetId).build()); } catch (GeneralSecurityException exception) { throw new DBException("Security error connecting to the datastore: " + exception.getMessage(), exception); } catch (IOException exception) { throw new DBException("I/O error connecting to the datastore: " + exception.getMessage(), exception); } logger.info("Datastore client instance created: " + datastore.toString()); } @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { LookupRequest.Builder lookupRequest = LookupRequest.newBuilder(); lookupRequest.addKeys(buildPrimaryKey(table, key)); lookupRequest.getReadOptionsBuilder().setReadConsistency( this.readConsistency); // Note above, datastore lookupRequest always reads the entire entity, it // does not support reading a subset of "fields" (properties) of an entity. logger.debug("Built lookup request as: " + lookupRequest.toString()); LookupResponse response = null; try { response = datastore.lookup(lookupRequest.build()); } catch (DatastoreException exception) { logger.error( String.format("Datastore Exception when reading (%s): %s %s", exception.getMessage(), exception.getMethodName(), exception.getCode())); // DatastoreException.getCode() returns an HTTP response code which we // will bubble up to the user as part of the YCSB Status "name". return new Status("ERROR-" + exception.getCode(), exception.getMessage()); } if (response.getFoundCount() == 0) { return new Status("ERROR-404", "Not Found, key is: " + key); } else if (response.getFoundCount() > 1) { // We only asked to lookup for one key, shouldn't have got more than one // entity back. Unexpected State. return Status.UNEXPECTED_STATE; } Entity entity = response.getFound(0).getEntity(); logger.debug("Read entity: " + entity.toString()); Map properties = entity.getProperties(); Set propertiesToReturn = (fields == null ? properties.keySet() : fields); for (String name : propertiesToReturn) { if (properties.containsKey(name)) { result.put(name, new StringByteIterator(properties.get(name) .getStringValue())); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { // TODO: Implement Scan as query on primary key. return Status.NOT_IMPLEMENTED; } @Override public Status update(String table, String key, - HashMap values) { + Map values) { return doSingleItemMutation(table, key, values, MutationType.UPDATE); } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { // Use Upsert to allow overwrite of existing key instead of failing the // load (or run) just because the DB already has the key. // This is the same behavior as what other DB does here (such as // the DynamoDB client). return doSingleItemMutation(table, key, values, MutationType.UPSERT); } @Override public Status delete(String table, String key) { return doSingleItemMutation(table, key, null, MutationType.DELETE); } private Key.Builder buildPrimaryKey(String table, String key) { Key.Builder result = Key.newBuilder(); if (this.entityGroupingMode == EntityGroupingMode.MULTI_ENTITY_PER_GROUP) { // All entities are in side the same group when we are in this mode. result.addPath(Key.PathElement.newBuilder().setKind(table). setName(rootEntityName)); } return result.addPath(Key.PathElement.newBuilder().setKind(table) .setName(key)); } private Status doSingleItemMutation(String table, String key, - @Nullable HashMap values, + @Nullable Map values, MutationType mutationType) { // First build the key. Key.Builder datastoreKey = buildPrimaryKey(table, key); // Build a commit request in non-transactional mode. // Single item mutation to google datastore // is always atomic and strongly consistent. Transaction is only necessary // for multi-item mutation, or Read-modify-write operation. CommitRequest.Builder commitRequest = CommitRequest.newBuilder(); commitRequest.setMode(Mode.NON_TRANSACTIONAL); if (mutationType == MutationType.DELETE) { commitRequest.addMutationsBuilder().setDelete(datastoreKey); } else { // If this is not for delete, build the entity. Entity.Builder entityBuilder = Entity.newBuilder(); entityBuilder.setKey(datastoreKey); for (Entry val : values.entrySet()) { entityBuilder.getMutableProperties() .put(val.getKey(), Value.newBuilder() .setStringValue(val.getValue().toString()).build()); } Entity entity = entityBuilder.build(); logger.debug("entity built as: " + entity.toString()); if (mutationType == MutationType.UPSERT) { commitRequest.addMutationsBuilder().setUpsert(entity); } else if (mutationType == MutationType.UPDATE){ commitRequest.addMutationsBuilder().setUpdate(entity); } else { throw new RuntimeException("Impossible MutationType, code bug."); } } try { datastore.commit(commitRequest.build()); logger.debug("successfully committed."); } catch (DatastoreException exception) { // Catch all Datastore rpc errors. // Log the exception, the name of the method called and the error code. logger.error( String.format("Datastore Exception when committing (%s): %s %s", exception.getMessage(), exception.getMethodName(), exception.getCode())); // DatastoreException.getCode() returns an HTTP response code which we // will bubble up to the user as part of the YCSB Status "name". return new Status("ERROR-" + exception.getCode(), exception.getMessage()); } return Status.OK; } } diff --git a/hbase098/src/main/java/com/yahoo/ycsb/db/HBaseClient.java b/hbase098/src/main/java/com/yahoo/ycsb/db/HBaseClient.java index 0d62baad..90cbb6a3 100644 --- a/hbase098/src/main/java/com/yahoo/ycsb/db/HBaseClient.java +++ b/hbase098/src/main/java/com/yahoo/ycsb/db/HBaseClient.java @@ -1,481 +1,482 @@ /** * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.measurements.Measurements; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.filter.PageFilter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; /** * HBase client for YCSB framework. */ public class HBaseClient extends com.yahoo.ycsb.DB { private static final Configuration CONFIG = HBaseConfiguration.create(); private static final AtomicInteger THREAD_COUNT = new AtomicInteger(0); private boolean debug = false; private String tableName = ""; private static HConnection hConn = null; private HTableInterface hTable = null; private String columnFamily = ""; private byte[] columnFamilyBytes; private boolean clientSideBuffering = false; private long writeBufferSize = 1024 * 1024 * 12; /** * Whether or not a page filter should be used to limit scan length. */ private boolean usePageFilter = true; private static final Object TABLE_LOCK = new Object(); /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() throws DBException { if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } if (getProperties().containsKey("clientbuffering")) { clientSideBuffering = Boolean.parseBoolean(getProperties().getProperty("clientbuffering")); } if (getProperties().containsKey("writebuffersize")) { writeBufferSize = Long.parseLong(getProperties().getProperty("writebuffersize")); } if ("false".equals(getProperties().getProperty("hbase.usepagefilter", "true"))) { usePageFilter = false; } if ("kerberos".equalsIgnoreCase(CONFIG.get("hbase.security.authentication"))) { CONFIG.set("hadoop.security.authentication", "Kerberos"); UserGroupInformation.setConfiguration(CONFIG); } if ((getProperties().getProperty("principal") != null) && (getProperties().getProperty("keytab") != null)) { try { UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"), getProperties().getProperty("keytab")); } catch (IOException e) { System.err.println("Keytab file is not readable or not found"); throw new DBException(e); } } try { THREAD_COUNT.getAndIncrement(); synchronized (THREAD_COUNT) { if (hConn == null) { hConn = HConnectionManager.createConnection(CONFIG); } } } catch (IOException e) { System.err.println("Connection to HBase was not successful"); throw new DBException(e); } columnFamily = getProperties().getProperty("columnfamily"); if (columnFamily == null) { System.err.println("Error, must specify a columnfamily for HBase tableName"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = Bytes.toBytes(columnFamily); // Terminate right now if tableName does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { HTableInterface ht = hConn.getTable(table); ht.getTableDescriptor(); } catch (IOException e) { throw new DBException(e); } } /** * Cleanup any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void cleanup() throws DBException { // Get the measurements instance as this is the only client that should // count clean up time like an update since autoflush is off. Measurements measurements = Measurements.getMeasurements(); try { long st = System.nanoTime(); if (hTable != null) { hTable.flushCommits(); } synchronized (THREAD_COUNT) { int threadCount = THREAD_COUNT.decrementAndGet(); if (threadCount <= 0 && hConn != null) { hConn.close(); } } long en = System.nanoTime(); measurements.measure("UPDATE", (int) ((en - st) / 1000)); } catch (IOException e) { throw new DBException(e); } } private void getHTable(String table) throws IOException { synchronized (TABLE_LOCK) { hTable = hConn.getTable(table); //2 suggestions from http://ryantwopointoh.blogspot.com/2009/01/performance-of-hbase-importing.html hTable.setAutoFlush(!clientSideBuffering, true); hTable.setWriteBufferSize(writeBufferSize); //return hTable; } } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the tableName * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { //if this is a "new" tableName, init HTable object. Else, use existing one if (!this.tableName.equals(table)) { hTable = null; try { getHTable(table); this.tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase tableName: " + e); return Status.ERROR; } } Result r; try { if (debug) { System.out.println("Doing read from HBase columnfamily " + columnFamily); System.out.println("Doing read for key: " + key); } Get g = new Get(Bytes.toBytes(key)); if (fields == null) { g.addFamily(columnFamilyBytes); } else { for (String field : fields) { g.addColumn(columnFamilyBytes, Bytes.toBytes(field)); } } r = hTable.get(g); } catch (IOException e) { System.err.println("Error doing get: " + e); return Status.ERROR; } catch (ConcurrentModificationException e) { //do nothing for now...need to understand HBase concurrency model better return Status.ERROR; } for (KeyValue kv : r.raw()) { result.put( Bytes.toString(kv.getQualifier()), new ByteArrayByteIterator(kv.getValue())); if (debug) { System.out.println("Result for field: " + Bytes.toString(kv.getQualifier()) + " is: " + Bytes.toString(kv.getValue())); } } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the tableName * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { //if this is a "new" tableName, init HTable object. Else, use existing one if (!this.tableName.equals(table)) { hTable = null; try { getHTable(table); this.tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase tableName: " + e); return Status.ERROR; } } Scan s = new Scan(Bytes.toBytes(startkey)); //HBase has no record limit. Here, assume recordcount is small enough to bring back in one call. //We get back recordcount records s.setCaching(recordcount); if (this.usePageFilter) { s.setFilter(new PageFilter(recordcount)); } //add specified fields or else all fields if (fields == null) { s.addFamily(columnFamilyBytes); } else { for (String field : fields) { s.addColumn(columnFamilyBytes, Bytes.toBytes(field)); } } //get results try (ResultScanner scanner = hTable.getScanner(s)) { int numResults = 0; for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { //get row key String key = Bytes.toString(rr.getRow()); if (debug) { System.out.println("Got scan result for key: " + key); } HashMap rowResult = new HashMap<>(); for (KeyValue kv : rr.raw()) { rowResult.put( Bytes.toString(kv.getQualifier()), new ByteArrayByteIterator(kv.getValue())); } //add rowResult to result vector result.add(rowResult); numResults++; // PageFilter does not guarantee that the number of results is <= pageSize, so this // break is required. //if hit recordcount, bail out if (numResults >= recordcount) { break; } } //done with row } catch (IOException e) { if (debug) { System.out.println("Error in getting/parsing scan result: " + e); } return Status.ERROR; } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the tableName * @param key The record key of the record to write * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { //if this is a "new" tableName, init HTable object. Else, use existing one if (!this.tableName.equals(table)) { hTable = null; try { getHTable(table); this.tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase tableName: " + e); return Status.ERROR; } } if (debug) { System.out.println("Setting up put for key: " + key); } Put p = new Put(Bytes.toBytes(key)); for (Map.Entry entry : values.entrySet()) { byte[] value = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.toStringBinary(value) + " to put request"); } p.add(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value); } try { hTable.put(p); } catch (IOException e) { if (debug) { System.err.println("Error doing put: " + e); } return Status.ERROR; } catch (ConcurrentModificationException e) { //do nothing for now...hope this is rare return Status.ERROR; } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the tableName * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { return update(table, key, values); } /** * Delete a record from the database. * * @param table The name of the tableName * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { //if this is a "new" tableName, init HTable object. Else, use existing one if (!this.tableName.equals(table)) { hTable = null; try { getHTable(table); this.tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase tableName: " + e); return Status.ERROR; } } if (debug) { System.out.println("Doing delete for key: " + key); } Delete d = new Delete(Bytes.toBytes(key)); try { hTable.delete(d); } catch (IOException e) { if (debug) { System.err.println("Error doing delete: " + e); } return Status.ERROR; } return Status.OK; } public static void main(String[] args) { if (args.length != 3) { System.out.println("Please specify a threadcount, columnfamily and operation count"); System.exit(0); } final int keyspace = 10000; //120000000; final int threadcount = Integer.parseInt(args[0]); final String columnfamily = args[1]; final int opcount = Integer.parseInt(args[2]) / threadcount; Vector allthreads = new Vector<>(); for (int i = 0; i < threadcount; i++) { Thread t = new Thread() { public void run() { try { Random random = new Random(); HBaseClient cli = new HBaseClient(); Properties props = new Properties(); props.setProperty("columnfamily", columnfamily); props.setProperty("debug", "true"); cli.setProperties(props); cli.init(); long accum = 0; for (int i = 0; i < opcount; i++) { int keynum = random.nextInt(keyspace); String key = "user" + keynum; long st = System.currentTimeMillis(); Status result; Vector> scanResults = new Vector<>(); + Set scanFields = new HashSet(); result = cli.scan("table1", "user2", 20, null, scanResults); long en = System.currentTimeMillis(); accum += (en - st); if (!result.equals(Status.OK)) { System.out.println("Error " + result + " for " + key); } if (i % 10 == 0) { System.out.println(i + " operations, average latency: " + (((double) accum) / ((double) i))); } } } catch (Exception e) { e.printStackTrace(); } } }; allthreads.add(t); } long st = System.currentTimeMillis(); for (Thread t : allthreads) { t.start(); } for (Thread t : allthreads) { try { t.join(); } catch (InterruptedException ignored) { //ignored } } long en = System.currentTimeMillis(); System.out.println("Throughput: " + ((1000.0) * (((double) (opcount * threadcount)) / ((double) (en - st)))) + " ops/sec"); } } diff --git a/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java b/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java index 2ef4defa..96d8cf06 100644 --- a/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java +++ b/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java @@ -1,541 +1,541 @@ /** * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.measurements.Measurements; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.PageFilter; import org.apache.hadoop.hbase.util.Bytes; import java.io.IOException; import java.util.ConcurrentModificationException; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.Vector; import java.util.concurrent.atomic.AtomicInteger; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; /** * HBase 1.0 client for YCSB framework. * * A modified version of HBaseClient (which targets HBase v0.9) utilizing the * HBase 1.0.0 API. * * This client also adds toggleable client-side buffering and configurable write * durability. */ public class HBaseClient10 extends com.yahoo.ycsb.DB { private Configuration config = HBaseConfiguration.create(); private static AtomicInteger threadCount = new AtomicInteger(0); private boolean debug = false; private String tableName = ""; /** * A Cluster Connection instance that is shared by all running ycsb threads. * Needs to be initialized late so we pick up command-line configs if any. * To ensure one instance only in a multi-threaded context, guard access * with a 'lock' object. * @See #CONNECTION_LOCK. */ private static Connection connection = null; private static final Object CONNECTION_LOCK = new Object(); // Depending on the value of clientSideBuffering, either bufferedMutator // (clientSideBuffering) or currentTable (!clientSideBuffering) will be used. private Table currentTable = null; private BufferedMutator bufferedMutator = null; private String columnFamily = ""; private byte[] columnFamilyBytes; /** * Durability to use for puts and deletes. */ private Durability durability = Durability.USE_DEFAULT; /** Whether or not a page filter should be used to limit scan length. */ private boolean usePageFilter = true; /** * If true, buffer mutations on the client. This is the default behavior for * HBaseClient. For measuring insert/update/delete latencies, client side * buffering should be disabled. */ private boolean clientSideBuffering = false; private long writeBufferSize = 1024 * 1024 * 12; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { if ("true" .equals(getProperties().getProperty("clientbuffering", "false"))) { this.clientSideBuffering = true; } if (getProperties().containsKey("writebuffersize")) { writeBufferSize = Long.parseLong(getProperties().getProperty("writebuffersize")); } if (getProperties().getProperty("durability") != null) { this.durability = Durability.valueOf(getProperties().getProperty("durability")); } if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) { config.set("hadoop.security.authentication", "Kerberos"); UserGroupInformation.setConfiguration(config); } if ((getProperties().getProperty("principal")!=null) && (getProperties().getProperty("keytab")!=null)) { try { UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"), getProperties().getProperty("keytab")); } catch (IOException e) { System.err.println("Keytab file is not readable or not found"); throw new DBException(e); } } try { threadCount.getAndIncrement(); synchronized (CONNECTION_LOCK) { if (connection == null) { // Initialize if not set up already. connection = ConnectionFactory.createConnection(config); } } } catch (java.io.IOException e) { throw new DBException(e); } if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } if ("false" .equals(getProperties().getProperty("hbase.usepagefilter", "true"))) { usePageFilter = false; } columnFamily = getProperties().getProperty("columnfamily"); if (columnFamily == null) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = Bytes.toBytes(columnFamily); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { final TableName tName = TableName.valueOf(table); synchronized (CONNECTION_LOCK) { connection.getTable(tName).getTableDescriptor(); } } catch (IOException e) { throw new DBException(e); } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { // Get the measurements instance as this is the only client that should // count clean up time like an update if client-side buffering is // enabled. Measurements measurements = Measurements.getMeasurements(); try { long st = System.nanoTime(); if (bufferedMutator != null) { bufferedMutator.close(); } if (currentTable != null) { currentTable.close(); } long en = System.nanoTime(); final String type = clientSideBuffering ? "UPDATE" : "CLEANUP"; measurements.measure(type, (int) ((en - st) / 1000)); threadCount.decrementAndGet(); if (threadCount.get() <= 0) { // Means we are done so ok to shut down the Connection. synchronized (CONNECTION_LOCK) { if (connection != null) { connection.close(); connection = null; } } } } catch (IOException e) { throw new DBException(e); } } public void getHTable(String table) throws IOException { final TableName tName = TableName.valueOf(table); synchronized (CONNECTION_LOCK) { this.currentTable = connection.getTable(tName); if (clientSideBuffering) { final BufferedMutatorParams p = new BufferedMutatorParams(tName); p.writeBufferSize(writeBufferSize); this.bufferedMutator = connection.getBufferedMutator(p); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } Result r = null; try { if (debug) { System.out .println("Doing read from HBase columnfamily " + columnFamily); System.out.println("Doing read for key: " + key); } Get g = new Get(Bytes.toBytes(key)); if (fields == null) { g.addFamily(columnFamilyBytes); } else { for (String field : fields) { g.addColumn(columnFamilyBytes, Bytes.toBytes(field)); } } r = currentTable.get(g); } catch (IOException e) { if (debug) { System.err.println("Error doing get: " + e); } return Status.ERROR; } catch (ConcurrentModificationException e) { // do nothing for now...need to understand HBase concurrency model better return Status.ERROR; } if (r.isEmpty()) { return Status.NOT_FOUND; } while (r.advance()) { final Cell c = r.current(); result.put(Bytes.toString(CellUtil.cloneQualifier(c)), new ByteArrayByteIterator(CellUtil.cloneValue(c))); if (debug) { System.out.println( "Result for field: " + Bytes.toString(CellUtil.cloneQualifier(c)) + " is: " + Bytes.toString(CellUtil.cloneValue(c))); } } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } Scan s = new Scan(Bytes.toBytes(startkey)); // HBase has no record limit. Here, assume recordcount is small enough to // bring back in one call. // We get back recordcount records s.setCaching(recordcount); if (this.usePageFilter) { s.setFilter(new PageFilter(recordcount)); } // add specified fields or else all fields if (fields == null) { s.addFamily(columnFamilyBytes); } else { for (String field : fields) { s.addColumn(columnFamilyBytes, Bytes.toBytes(field)); } } // get results ResultScanner scanner = null; try { scanner = currentTable.getScanner(s); int numResults = 0; for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { // get row key String key = Bytes.toString(rr.getRow()); if (debug) { System.out.println("Got scan result for key: " + key); } HashMap rowResult = new HashMap(); while (rr.advance()) { final Cell cell = rr.current(); rowResult.put(Bytes.toString(CellUtil.cloneQualifier(cell)), new ByteArrayByteIterator(CellUtil.cloneValue(cell))); } // add rowResult to result vector result.add(rowResult); numResults++; // PageFilter does not guarantee that the number of results is <= // pageSize, so this // break is required. if (numResults >= recordcount) {// if hit recordcount, bail out break; } } // done with row } catch (IOException e) { if (debug) { System.out.println("Error in getting/parsing scan result: " + e); } return Status.ERROR; } finally { if (scanner != null) { scanner.close(); } } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, - HashMap values) { + Map values) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } if (debug) { System.out.println("Setting up put for key: " + key); } Put p = new Put(Bytes.toBytes(key)); p.setDurability(durability); for (Map.Entry entry : values.entrySet()) { byte[] value = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.toStringBinary(value) + " to put request"); } p.addColumn(columnFamilyBytes, Bytes.toBytes(entry.getKey()), value); } try { if (clientSideBuffering) { Preconditions.checkNotNull(bufferedMutator); bufferedMutator.mutate(p); } else { currentTable.put(p); } } catch (IOException e) { if (debug) { System.err.println("Error doing put: " + e); } return Status.ERROR; } catch (ConcurrentModificationException e) { // do nothing for now...hope this is rare return Status.ERROR; } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, - HashMap values) { + Map values) { return update(table, key, values); } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; try { getHTable(table); tableName = table; } catch (IOException e) { System.err.println("Error accessing HBase table: " + e); return Status.ERROR; } } if (debug) { System.out.println("Doing delete for key: " + key); } final Delete d = new Delete(Bytes.toBytes(key)); d.setDurability(durability); try { if (clientSideBuffering) { Preconditions.checkNotNull(bufferedMutator); bufferedMutator.mutate(d); } else { currentTable.delete(d); } } catch (IOException e) { if (debug) { System.err.println("Error doing delete: " + e); } return Status.ERROR; } return Status.OK; } @VisibleForTesting void setConfiguration(final Configuration newConfig) { this.config = newConfig; } } /* * For customized vim control set autoindent set si set shiftwidth=4 */ diff --git a/hbase10/src/test/java/com/yahoo/ycsb/db/HBaseClient10Test.java b/hbase10/src/test/java/com/yahoo/ycsb/db/HBaseClient10Test.java index f77595ba..040c417b 100644 --- a/hbase10/src/test/java/com/yahoo/ycsb/db/HBaseClient10Test.java +++ b/hbase10/src/test/java/com/yahoo/ycsb/db/HBaseClient10Test.java @@ -1,213 +1,214 @@ /** * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.junit.Assume.assumeTrue; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import com.yahoo.ycsb.measurements.Measurements; import com.yahoo.ycsb.workloads.CoreWorkload; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; +import java.util.Map; import java.util.List; import java.util.Properties; import java.util.Vector; /** * Integration tests for the YCSB HBase client 1.0, using an HBase minicluster. */ public class HBaseClient10Test { private final static String COLUMN_FAMILY = "cf"; private static HBaseTestingUtility testingUtil; private HBaseClient10 client; private Table table = null; private String tableName; private static boolean isWindows() { final String os = System.getProperty("os.name"); return os.startsWith("Windows"); } /** * Creates a mini-cluster for use in these tests. * * This is a heavy-weight operation, so invoked only once for the test class. */ @BeforeClass public static void setUpClass() throws Exception { // Minicluster setup fails on Windows with an UnsatisfiedLinkError. // Skip if windows. assumeTrue(!isWindows()); testingUtil = HBaseTestingUtility.createLocalHTU(); testingUtil.startMiniCluster(); } /** * Tears down mini-cluster. */ @AfterClass public static void tearDownClass() throws Exception { if (testingUtil != null) { testingUtil.shutdownMiniCluster(); } } /** * Sets up the mini-cluster for testing. * * We re-create the table for each test. */ @Before public void setUp() throws Exception { client = new HBaseClient10(); client.setConfiguration(new Configuration(testingUtil.getConfiguration())); Properties p = new Properties(); p.setProperty("columnfamily", COLUMN_FAMILY); Measurements.setProperties(p); final CoreWorkload workload = new CoreWorkload(); workload.init(p); tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY)); client.setProperties(p); client.init(); } @After public void tearDown() throws Exception { table.close(); testingUtil.deleteTable(tableName); } @Test public void testRead() throws Exception { final String rowKey = "row1"; final Put p = new Put(Bytes.toBytes(rowKey)); p.addColumn(Bytes.toBytes(COLUMN_FAMILY), Bytes.toBytes("column1"), Bytes.toBytes("value1")); p.addColumn(Bytes.toBytes(COLUMN_FAMILY), Bytes.toBytes("column2"), Bytes.toBytes("value2")); table.put(p); final HashMap result = new HashMap(); final Status status = client.read(tableName, rowKey, null, result); assertEquals(Status.OK, status); assertEquals(2, result.size()); assertEquals("value1", result.get("column1").toString()); assertEquals("value2", result.get("column2").toString()); } @Test public void testReadMissingRow() throws Exception { final HashMap result = new HashMap(); final Status status = client.read(tableName, "Missing row", null, result); assertEquals(Status.NOT_FOUND, status); assertEquals(0, result.size()); } @Test public void testScan() throws Exception { // Fill with data final String colStr = "row_number"; final byte[] col = Bytes.toBytes(colStr); final int n = 10; final List puts = new ArrayList(n); for(int i = 0; i < n; i++) { final byte[] key = Bytes.toBytes(String.format("%05d", i)); final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array(); final Put p = new Put(key); p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value); puts.add(p); } table.put(puts); // Test final Vector> result = new Vector>(); // Scan 5 records, skipping the first client.scan(tableName, "00001", 5, null, result); assertEquals(5, result.size()); for(int i = 0; i < 5; i++) { - final HashMap row = result.get(i); + final Map row = result.get(i); assertEquals(1, row.size()); assertTrue(row.containsKey(colStr)); final byte[] bytes = row.get(colStr).toArray(); final ByteBuffer buf = ByteBuffer.wrap(bytes); final int rowNum = buf.getInt(); assertEquals(i + 1, rowNum); } } @Test public void testUpdate() throws Exception{ final String key = "key"; - final HashMap input = new HashMap(); + final Map input = new HashMap(); input.put("column1", "value1"); input.put("column2", "value2"); final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input)); assertEquals(Status.OK, status); // Verify result final Get get = new Get(Bytes.toBytes(key)); final Result result = this.table.get(get); assertFalse(result.isEmpty()); assertEquals(2, result.size()); for(final java.util.Map.Entry entry : input.entrySet()) { assertEquals(entry.getValue(), new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY), Bytes.toBytes(entry.getKey())))); } } @Test @Ignore("Not yet implemented") public void testDelete() { fail("Not yet implemented"); } } diff --git a/hypertable/src/main/java/com/yahoo/ycsb/db/HypertableClient.java b/hypertable/src/main/java/com/yahoo/ycsb/db/HypertableClient.java index 22b6a2c8..4107ec41 100644 --- a/hypertable/src/main/java/com/yahoo/ycsb/db/HypertableClient.java +++ b/hypertable/src/main/java/com/yahoo/ycsb/db/HypertableClient.java @@ -1,346 +1,346 @@ /** * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import org.apache.thrift.TException; import org.hypertable.thrift.SerializedCellsFlag; import org.hypertable.thrift.SerializedCellsReader; import org.hypertable.thrift.SerializedCellsWriter; import org.hypertable.thrift.ThriftClient; import org.hypertable.thriftgen.Cell; import org.hypertable.thriftgen.ClientException; import org.hypertable.thriftgen.Key; import org.hypertable.thriftgen.KeyFlag; import org.hypertable.thriftgen.RowInterval; import org.hypertable.thriftgen.ScanSpec; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.Vector; /** * Hypertable client for YCSB framework. */ public class HypertableClient extends com.yahoo.ycsb.DB { public static final String NAMESPACE = "/ycsb"; public static final int THRIFTBROKER_PORT = 38080; public static final int BUFFER_SIZE = 4096; private boolean debug = false; private ThriftClient connection; private long ns; private String columnFamily = ""; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").equals("true"))) { debug = true; } try { connection = ThriftClient.create("localhost", THRIFTBROKER_PORT); if (!connection.namespace_exists(NAMESPACE)) { connection.namespace_create(NAMESPACE); } ns = connection.open_namespace(NAMESPACE); } catch (ClientException e) { throw new DBException("Could not open namespace", e); } catch (TException e) { throw new DBException("Could not open namespace", e); } columnFamily = getProperties().getProperty("columnfamily"); if (columnFamily == null) { System.err.println( "Error, must specify a " + "columnfamily for Hypertable table"); throw new DBException("No columnfamily specified"); } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { try { connection.namespace_close(ns); } catch (ClientException e) { throw new DBException("Could not close namespace", e); } catch (TException e) { throw new DBException("Could not close namespace", e); } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { // SELECT _column_family:field[i] // FROM table WHERE ROW=key MAX_VERSIONS 1; if (debug) { System.out .println("Doing read from Hypertable columnfamily " + columnFamily); System.out.println("Doing read for key: " + key); } try { if (null != fields) { Vector> resMap = new Vector>(); if (!scan(table, key, 1, fields, resMap).equals(Status.OK)) { return Status.ERROR; } if (!resMap.isEmpty()) { result.putAll(resMap.firstElement()); } } else { SerializedCellsReader reader = new SerializedCellsReader(null); reader.reset(connection.get_row_serialized(ns, table, key)); while (reader.next()) { result.put(new String(reader.get_column_qualifier()), new ByteArrayByteIterator(reader.get_value())); } } } catch (ClientException e) { if (debug) { System.err.println("Error doing read: " + e.message); } return Status.ERROR; } catch (TException e) { if (debug) { System.err.println("Error doing read"); } return Status.ERROR; } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { // SELECT _columnFamily:fields FROM table WHERE (ROW >= startkey) // LIMIT recordcount MAX_VERSIONS 1; ScanSpec spec = new ScanSpec(); RowInterval elem = new RowInterval(); elem.setStart_inclusive(true); elem.setStart_row(startkey); spec.addToRow_intervals(elem); if (null != fields) { for (String field : fields) { spec.addToColumns(columnFamily + ":" + field); } } spec.setVersions(1); spec.setRow_limit(recordcount); SerializedCellsReader reader = new SerializedCellsReader(null); try { long sc = connection.scanner_open(ns, table, spec); String lastRow = null; boolean eos = false; while (!eos) { reader.reset(connection.scanner_get_cells_serialized(sc)); while (reader.next()) { String currentRow = new String(reader.get_row()); if (!currentRow.equals(lastRow)) { result.add(new HashMap()); lastRow = currentRow; } result.lastElement().put(new String(reader.get_column_qualifier()), new ByteArrayByteIterator(reader.get_value())); } eos = reader.eos(); if (debug) { System.out .println("Number of rows retrieved so far: " + result.size()); } } connection.scanner_close(sc); } catch (ClientException e) { if (debug) { System.err.println("Error doing scan: " + e.message); } return Status.ERROR; } catch (TException e) { if (debug) { System.err.println("Error doing scan"); } return Status.ERROR; } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, - HashMap values) { + Map values) { return insert(table, key, values); } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, - HashMap values) { + Map values) { // INSERT INTO table VALUES // (key, _column_family:entry,getKey(), entry.getValue()), (...); if (debug) { System.out.println("Setting up put for key: " + key); } try { long mutator = connection.mutator_open(ns, table, 0, 0); SerializedCellsWriter writer = new SerializedCellsWriter(BUFFER_SIZE * values.size(), true); for (Map.Entry entry : values.entrySet()) { writer.add(key, columnFamily, entry.getKey(), SerializedCellsFlag.AUTO_ASSIGN, ByteBuffer.wrap(entry.getValue().toArray())); } connection.mutator_set_cells_serialized(mutator, writer.buffer(), true); connection.mutator_close(mutator); } catch (ClientException e) { if (debug) { System.err.println("Error doing set: " + e.message); } return Status.ERROR; } catch (TException e) { if (debug) { System.err.println("Error doing set"); } return Status.ERROR; } return Status.OK; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { // DELETE * FROM table WHERE ROW=key; if (debug) { System.out.println("Doing delete for key: " + key); } Cell entry = new Cell(); entry.key = new Key(); entry.key.row = key; entry.key.flag = KeyFlag.DELETE_ROW; try { connection.set_cell(ns, table, entry); } catch (ClientException e) { if (debug) { System.err.println("Error doing delete: " + e.message); } return Status.ERROR; } catch (TException e) { if (debug) { System.err.println("Error doing delete"); } return Status.ERROR; } return Status.OK; } } diff --git a/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanClient.java b/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanClient.java index 7fa75fd1..e6373089 100644 --- a/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanClient.java +++ b/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanClient.java @@ -1,156 +1,153 @@ /** * Copyright (c) 2012-2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import org.infinispan.Cache; import org.infinispan.atomic.AtomicMap; import org.infinispan.atomic.AtomicMapLookup; import org.infinispan.manager.DefaultCacheManager; import org.infinispan.manager.EmbeddedCacheManager; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.Vector; /** * This is a client implementation for Infinispan 5.x. */ public class InfinispanClient extends DB { private static final Log LOGGER = LogFactory.getLog(InfinispanClient.class); // An optimisation for clustered mode private final boolean clustered; private EmbeddedCacheManager infinispanManager; public InfinispanClient() { clustered = Boolean.getBoolean("infinispan.clustered"); } public void init() throws DBException { try { infinispanManager = new DefaultCacheManager("infinispan-config.xml"); } catch (IOException e) { throw new DBException(e); } } public void cleanup() { infinispanManager.stop(); infinispanManager = null; } - public Status read(String table, String key, Set fields, - HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { Map row; if (clustered) { row = AtomicMapLookup.getAtomicMap(infinispanManager.getCache(table), key, false); } else { Cache> cache = infinispanManager.getCache(table); row = cache.get(key); } if (row != null) { result.clear(); if (fields == null || fields.isEmpty()) { StringByteIterator.putAllAsByteIterators(result, row); } else { for (String field : fields) { result.put(field, new StringByteIterator(row.get(field))); } } } return Status.OK; } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { LOGGER.warn("Infinispan does not support scan semantics"); return Status.OK; } - public Status update(String table, String key, - HashMap values) { + public Status update(String table, String key, Map values) { try { if (clustered) { AtomicMap row = AtomicMapLookup.getAtomicMap(infinispanManager.getCache(table), key); StringByteIterator.putAllAsStrings(row, values); } else { Cache> cache = infinispanManager.getCache(table); Map row = cache.get(key); if (row == null) { row = StringByteIterator.getStringMap(values); cache.put(key, row); } else { StringByteIterator.putAllAsStrings(row, values); } } return Status.OK; } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } - public Status insert(String table, String key, - HashMap values) { + public Status insert(String table, String key, Map values) { try { if (clustered) { AtomicMap row = AtomicMapLookup.getAtomicMap(infinispanManager.getCache(table), key); row.clear(); StringByteIterator.putAllAsStrings(row, values); } else { infinispanManager.getCache(table).put(key, values); } return Status.OK; } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } public Status delete(String table, String key) { try { if (clustered) { AtomicMapLookup.removeAtomicMap(infinispanManager.getCache(table), key); } else { infinispanManager.getCache(table).remove(key); } return Status.OK; } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } } diff --git a/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanRemoteClient.java b/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanRemoteClient.java index 26ce8359..d2a535d1 100644 --- a/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanRemoteClient.java +++ b/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanRemoteClient.java @@ -1,139 +1,139 @@ /** * Copyright (c) 2015-2016 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.yahoo.ycsb.*; import org.infinispan.client.hotrod.RemoteCache; import org.infinispan.client.hotrod.RemoteCacheManager; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.Vector; /** * This is a client implementation for Infinispan 5.x in client-server mode. */ public class InfinispanRemoteClient extends DB { private static final Log LOGGER = LogFactory.getLog(InfinispanRemoteClient.class); private RemoteCacheManager remoteIspnManager; private String cacheName = null; @Override public void init() throws DBException { remoteIspnManager = RemoteCacheManagerHolder.getInstance(getProperties()); cacheName = getProperties().getProperty("cache"); } @Override public void cleanup() { remoteIspnManager.stop(); remoteIspnManager = null; } @Override - public Status insert(String table, String recordKey, HashMap values) { + public Status insert(String table, String recordKey, Map values) { String compositKey = createKey(table, recordKey); Map stringValues = new HashMap<>(); StringByteIterator.putAllAsStrings(stringValues, values); try { cache().put(compositKey, stringValues); return Status.OK; } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } @Override - public Status read(String table, String recordKey, Set fields, HashMap result) { + public Status read(String table, String recordKey, Set fields, Map result) { String compositKey = createKey(table, recordKey); try { Map values = cache().get(compositKey); if (values == null || values.isEmpty()) { return Status.NOT_FOUND; } if (fields == null) { //get all field/value pairs StringByteIterator.putAllAsByteIterators(result, values); } else { for (String field : fields) { String value = values.get(field); if (value != null) { result.put(field, new StringByteIterator(value)); } } } return Status.OK; } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { LOGGER.warn("Infinispan does not support scan semantics"); return Status.NOT_IMPLEMENTED; } @Override - public Status update(String table, String recordKey, HashMap values) { + public Status update(String table, String recordKey, Map values) { String compositKey = createKey(table, recordKey); try { Map stringValues = new HashMap<>(); StringByteIterator.putAllAsStrings(stringValues, values); cache().put(compositKey, stringValues); return Status.OK; } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } @Override public Status delete(String table, String recordKey) { String compositKey = createKey(table, recordKey); try { cache().remove(compositKey); return Status.OK; } catch (Exception e) { LOGGER.error(e); return Status.ERROR; } } private RemoteCache> cache() { if (this.cacheName != null) { return remoteIspnManager.getCache(cacheName); } else { return remoteIspnManager.getCache(); } } private String createKey(String table, String recordKey) { return table + "-" + recordKey; } } diff --git a/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBClient.java b/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBClient.java index aeaf431f..294fa096 100644 --- a/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBClient.java +++ b/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBClient.java @@ -1,501 +1,501 @@ /** * Copyright (c) 2010 - 2016 Yahoo! Inc., 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import java.sql.*; import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import com.yahoo.ycsb.db.flavors.DBFlavor; /** * A class that wraps a JDBC compliant database to allow it to be interfaced * with YCSB. This class extends {@link DB} and implements the database * interface used by YCSB client. * *
* Each client will have its own instance of this class. This client is not * thread safe. * *
* This interface expects a schema ... All * attributes are of type VARCHAR. All accesses are through the primary key. * Therefore, only one index on the primary key is needed. */ public class JdbcDBClient extends DB { /** The class to use as the jdbc driver. */ public static final String DRIVER_CLASS = "db.driver"; /** The URL to connect to the database. */ public static final String CONNECTION_URL = "db.url"; /** The user name to use to connect to the database. */ public static final String CONNECTION_USER = "db.user"; /** The password to use for establishing the connection. */ public static final String CONNECTION_PASSWD = "db.passwd"; /** The batch size for batched inserts. Set to >0 to use batching */ public static final String DB_BATCH_SIZE = "db.batchsize"; /** The JDBC fetch size hinted to the driver. */ public static final String JDBC_FETCH_SIZE = "jdbc.fetchsize"; /** The JDBC connection auto-commit property for the driver. */ public static final String JDBC_AUTO_COMMIT = "jdbc.autocommit"; public static final String JDBC_BATCH_UPDATES = "jdbc.batchupdateapi"; /** The name of the property for the number of fields in a record. */ public static final String FIELD_COUNT_PROPERTY = "fieldcount"; /** Default number of fields in a record. */ public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10"; /** Representing a NULL value. */ public static final String NULL_VALUE = "NULL"; /** The primary key in the user table. */ public static final String PRIMARY_KEY = "YCSB_KEY"; /** The field name prefix in the table. */ public static final String COLUMN_PREFIX = "FIELD"; - private ArrayList conns; + private List conns; private boolean initialized = false; private Properties props; private int jdbcFetchSize; private int batchSize; private boolean autoCommit; private boolean batchUpdates; private static final String DEFAULT_PROP = ""; private ConcurrentMap cachedStatements; private long numRowsInBatch = 0; /** DB flavor defines DB-specific syntax and behavior for the * particular database. Current database flavors are: {default, phoenix} */ private DBFlavor dbFlavor; /** * Ordered field information for insert and update statements. */ private static class OrderedFieldInfo { private String fieldKeys; private List fieldValues; OrderedFieldInfo(String fieldKeys, List fieldValues) { this.fieldKeys = fieldKeys; this.fieldValues = fieldValues; } String getFieldKeys() { return fieldKeys; } List getFieldValues() { return fieldValues; } } /** * For the given key, returns what shard contains data for this key. * * @param key Data key to do operation on * @return Shard index */ private int getShardIndexByKey(String key) { int ret = Math.abs(key.hashCode()) % conns.size(); return ret; } /** * For the given key, returns Connection object that holds connection to the * shard that contains this key. * * @param key Data key to get information for * @return Connection object */ private Connection getShardConnectionByKey(String key) { return conns.get(getShardIndexByKey(key)); } private void cleanupAllConnections() throws SQLException { for (Connection conn : conns) { if (!autoCommit) { conn.commit(); } conn.close(); } } /** Returns parsed int value from the properties if set, otherwise returns -1. */ private static int getIntProperty(Properties props, String key) throws DBException { String valueStr = props.getProperty(key); if (valueStr != null) { try { return Integer.parseInt(valueStr); } catch (NumberFormatException nfe) { System.err.println("Invalid " + key + " specified: " + valueStr); throw new DBException(nfe); } } return -1; } /** Returns parsed boolean value from the properties if set, otherwise returns defaultVal. */ private static boolean getBoolProperty(Properties props, String key, boolean defaultVal) { String valueStr = props.getProperty(key); if (valueStr != null) { return Boolean.parseBoolean(valueStr); } return defaultVal; } @Override public void init() throws DBException { if (initialized) { System.err.println("Client connection already initialized."); return; } props = getProperties(); String urls = props.getProperty(CONNECTION_URL, DEFAULT_PROP); String user = props.getProperty(CONNECTION_USER, DEFAULT_PROP); String passwd = props.getProperty(CONNECTION_PASSWD, DEFAULT_PROP); String driver = props.getProperty(DRIVER_CLASS); this.jdbcFetchSize = getIntProperty(props, JDBC_FETCH_SIZE); this.batchSize = getIntProperty(props, DB_BATCH_SIZE); this.autoCommit = getBoolProperty(props, JDBC_AUTO_COMMIT, true); this.batchUpdates = getBoolProperty(props, JDBC_BATCH_UPDATES, false); try { if (driver != null) { Class.forName(driver); } int shardCount = 0; conns = new ArrayList(3); final String[] urlArr = urls.split(","); for (String url : urlArr) { System.out.println("Adding shard node URL: " + url); Connection conn = DriverManager.getConnection(url, user, passwd); // Since there is no explicit commit method in the DB interface, all // operations should auto commit, except when explicitly told not to // (this is necessary in cases such as for PostgreSQL when running a // scan workload with fetchSize) conn.setAutoCommit(autoCommit); shardCount++; conns.add(conn); } System.out.println("Using shards: " + shardCount + ", batchSize:" + batchSize + ", fetchSize: " + jdbcFetchSize); cachedStatements = new ConcurrentHashMap(); this.dbFlavor = DBFlavor.fromJdbcUrl(urlArr[0]); } catch (ClassNotFoundException e) { System.err.println("Error in initializing the JDBS driver: " + e); throw new DBException(e); } catch (SQLException e) { System.err.println("Error in database operation: " + e); throw new DBException(e); } catch (NumberFormatException e) { System.err.println("Invalid value for fieldcount property. " + e); throw new DBException(e); } initialized = true; } @Override public void cleanup() throws DBException { if (batchSize > 0) { try { // commit un-finished batches for (PreparedStatement st : cachedStatements.values()) { if (!st.getConnection().isClosed() && !st.isClosed() && (numRowsInBatch % batchSize != 0)) { st.executeBatch(); } } } catch (SQLException e) { System.err.println("Error in cleanup execution. " + e); throw new DBException(e); } } try { cleanupAllConnections(); } catch (SQLException e) { System.err.println("Error in closing the connection. " + e); throw new DBException(e); } } private PreparedStatement createAndCacheInsertStatement(StatementType insertType, String key) throws SQLException { String insert = dbFlavor.createInsertStatement(insertType, key); PreparedStatement insertStatement = getShardConnectionByKey(key).prepareStatement(insert); PreparedStatement stmt = cachedStatements.putIfAbsent(insertType, insertStatement); if (stmt == null) { return insertStatement; } return stmt; } private PreparedStatement createAndCacheReadStatement(StatementType readType, String key) throws SQLException { String read = dbFlavor.createReadStatement(readType, key); PreparedStatement readStatement = getShardConnectionByKey(key).prepareStatement(read); PreparedStatement stmt = cachedStatements.putIfAbsent(readType, readStatement); if (stmt == null) { return readStatement; } return stmt; } private PreparedStatement createAndCacheDeleteStatement(StatementType deleteType, String key) throws SQLException { String delete = dbFlavor.createDeleteStatement(deleteType, key); PreparedStatement deleteStatement = getShardConnectionByKey(key).prepareStatement(delete); PreparedStatement stmt = cachedStatements.putIfAbsent(deleteType, deleteStatement); if (stmt == null) { return deleteStatement; } return stmt; } private PreparedStatement createAndCacheUpdateStatement(StatementType updateType, String key) throws SQLException { String update = dbFlavor.createUpdateStatement(updateType, key); PreparedStatement insertStatement = getShardConnectionByKey(key).prepareStatement(update); PreparedStatement stmt = cachedStatements.putIfAbsent(updateType, insertStatement); if (stmt == null) { return insertStatement; } return stmt; } private PreparedStatement createAndCacheScanStatement(StatementType scanType, String key) throws SQLException { String select = dbFlavor.createScanStatement(scanType, key); PreparedStatement scanStatement = getShardConnectionByKey(key).prepareStatement(select); if (this.jdbcFetchSize > 0) { scanStatement.setFetchSize(this.jdbcFetchSize); } PreparedStatement stmt = cachedStatements.putIfAbsent(scanType, scanStatement); if (stmt == null) { return scanStatement; } return stmt; } @Override - public Status read(String tableName, String key, Set fields, HashMap result) { + public Status read(String tableName, String key, Set fields, Map result) { try { StatementType type = new StatementType(StatementType.Type.READ, tableName, 1, "", getShardIndexByKey(key)); PreparedStatement readStatement = cachedStatements.get(type); if (readStatement == null) { readStatement = createAndCacheReadStatement(type, key); } readStatement.setString(1, key); ResultSet resultSet = readStatement.executeQuery(); if (!resultSet.next()) { resultSet.close(); return Status.NOT_FOUND; } if (result != null && fields != null) { for (String field : fields) { String value = resultSet.getString(field); result.put(field, new StringByteIterator(value)); } } resultSet.close(); return Status.OK; } catch (SQLException e) { System.err.println("Error in processing read of table " + tableName + ": " + e); return Status.ERROR; } } @Override public Status scan(String tableName, String startKey, int recordcount, Set fields, Vector> result) { try { StatementType type = new StatementType(StatementType.Type.SCAN, tableName, 1, "", getShardIndexByKey(startKey)); PreparedStatement scanStatement = cachedStatements.get(type); if (scanStatement == null) { scanStatement = createAndCacheScanStatement(type, startKey); } scanStatement.setString(1, startKey); scanStatement.setInt(2, recordcount); ResultSet resultSet = scanStatement.executeQuery(); for (int i = 0; i < recordcount && resultSet.next(); i++) { if (result != null && fields != null) { HashMap values = new HashMap(); for (String field : fields) { String value = resultSet.getString(field); values.put(field, new StringByteIterator(value)); } result.add(values); } } resultSet.close(); return Status.OK; } catch (SQLException e) { System.err.println("Error in processing scan of table: " + tableName + e); return Status.ERROR; } } @Override - public Status update(String tableName, String key, HashMap values) { + public Status update(String tableName, String key, Map values) { try { int numFields = values.size(); OrderedFieldInfo fieldInfo = getFieldInfo(values); StatementType type = new StatementType(StatementType.Type.UPDATE, tableName, numFields, fieldInfo.getFieldKeys(), getShardIndexByKey(key)); PreparedStatement updateStatement = cachedStatements.get(type); if (updateStatement == null) { updateStatement = createAndCacheUpdateStatement(type, key); } int index = 1; for (String value: fieldInfo.getFieldValues()) { updateStatement.setString(index++, value); } updateStatement.setString(index, key); int result = updateStatement.executeUpdate(); if (result == 1) { return Status.OK; } return Status.UNEXPECTED_STATE; } catch (SQLException e) { System.err.println("Error in processing update to table: " + tableName + e); return Status.ERROR; } } @Override - public Status insert(String tableName, String key, HashMap values) { + public Status insert(String tableName, String key, Map values) { try { int numFields = values.size(); OrderedFieldInfo fieldInfo = getFieldInfo(values); StatementType type = new StatementType(StatementType.Type.INSERT, tableName, numFields, fieldInfo.getFieldKeys(), getShardIndexByKey(key)); PreparedStatement insertStatement = cachedStatements.get(type); if (insertStatement == null) { insertStatement = createAndCacheInsertStatement(type, key); } insertStatement.setString(1, key); int index = 2; for (String value: fieldInfo.getFieldValues()) { insertStatement.setString(index++, value); } // Using the batch insert API if (batchUpdates) { insertStatement.addBatch(); // Check for a sane batch size if (batchSize > 0) { // Commit the batch after it grows beyond the configured size if (++numRowsInBatch % batchSize == 0) { int[] results = insertStatement.executeBatch(); for (int r : results) { if (r != 1) { return Status.ERROR; } } // If autoCommit is off, make sure we commit the batch if (!autoCommit) { getShardConnectionByKey(key).commit(); } return Status.OK; } // else, the default value of -1 or a nonsense. Treat it as an infinitely large batch. } // else, we let the batch accumulate // Added element to the batch, potentially committing the batch too. return Status.BATCHED_OK; } else { // Normal update int result = insertStatement.executeUpdate(); // If we are not autoCommit, we might have to commit now if (!autoCommit) { // Let updates be batcher locally if (batchSize > 0) { if (++numRowsInBatch % batchSize == 0) { // Send the batch of updates getShardConnectionByKey(key).commit(); } // uhh return Status.OK; } else { // Commit each update getShardConnectionByKey(key).commit(); } } if (result == 1) { return Status.OK; } } return Status.UNEXPECTED_STATE; } catch (SQLException e) { System.err.println("Error in processing insert to table: " + tableName + e); return Status.ERROR; } } @Override public Status delete(String tableName, String key) { try { StatementType type = new StatementType(StatementType.Type.DELETE, tableName, 1, "", getShardIndexByKey(key)); PreparedStatement deleteStatement = cachedStatements.get(type); if (deleteStatement == null) { deleteStatement = createAndCacheDeleteStatement(type, key); } deleteStatement.setString(1, key); int result = deleteStatement.executeUpdate(); if (result == 1) { return Status.OK; } return Status.UNEXPECTED_STATE; } catch (SQLException e) { System.err.println("Error in processing delete to table: " + tableName + e); return Status.ERROR; } } - private OrderedFieldInfo getFieldInfo(HashMap values) { + private OrderedFieldInfo getFieldInfo(Map values) { String fieldKeys = ""; List fieldValues = new ArrayList<>(); int count = 0; for (Map.Entry entry : values.entrySet()) { fieldKeys += entry.getKey(); if (count < values.size() - 1) { fieldKeys += ","; } fieldValues.add(count, entry.getValue().toString()); count++; } return new OrderedFieldInfo(fieldKeys, fieldValues); } } diff --git a/jdbc/src/test/java/com/yahoo/ycsb/db/JdbcDBClientTest.java b/jdbc/src/test/java/com/yahoo/ycsb/db/JdbcDBClientTest.java index d298f9e4..212ebc0e 100644 --- a/jdbc/src/test/java/com/yahoo/ycsb/db/JdbcDBClientTest.java +++ b/jdbc/src/test/java/com/yahoo/ycsb/db/JdbcDBClientTest.java @@ -1,391 +1,393 @@ /** * Copyright (c) 2015 - 2016 Yahoo! Inc., 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import static org.junit.Assert.*; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.StringByteIterator; import org.junit.*; import java.sql.*; import java.util.HashMap; +import java.util.Map; import java.util.HashSet; +import java.util.Set; import java.util.Properties; import java.util.Vector; public class JdbcDBClientTest { private static final String TEST_DB_DRIVER = "org.hsqldb.jdbc.JDBCDriver"; private static final String TEST_DB_URL = "jdbc:hsqldb:mem:ycsb"; private static final String TEST_DB_USER = "sa"; private static final String TABLE_NAME = "USERTABLE"; private static final int FIELD_LENGTH = 32; private static final String FIELD_PREFIX = "FIELD"; private static final String KEY_PREFIX = "user"; private static final String KEY_FIELD = "YCSB_KEY"; private static final int NUM_FIELDS = 3; private static Connection jdbcConnection = null; private static JdbcDBClient jdbcDBClient = null; @BeforeClass public static void setup() { setupWithBatch(1, true); } public static void setupWithBatch(int batchSize, boolean autoCommit) { try { jdbcConnection = DriverManager.getConnection(TEST_DB_URL); jdbcDBClient = new JdbcDBClient(); Properties p = new Properties(); p.setProperty(JdbcDBClient.CONNECTION_URL, TEST_DB_URL); p.setProperty(JdbcDBClient.DRIVER_CLASS, TEST_DB_DRIVER); p.setProperty(JdbcDBClient.CONNECTION_USER, TEST_DB_USER); p.setProperty(JdbcDBClient.DB_BATCH_SIZE, Integer.toString(batchSize)); p.setProperty(JdbcDBClient.JDBC_BATCH_UPDATES, "true"); p.setProperty(JdbcDBClient.JDBC_AUTO_COMMIT, Boolean.toString(autoCommit)); jdbcDBClient.setProperties(p); jdbcDBClient.init(); } catch (SQLException e) { e.printStackTrace(); fail("Could not create local Database"); } catch (DBException e) { e.printStackTrace(); fail("Could not create JdbcDBClient instance"); } } @AfterClass public static void teardown() { try { if (jdbcConnection != null) { jdbcConnection.close(); } } catch (SQLException e) { e.printStackTrace(); } try { if (jdbcDBClient != null) { jdbcDBClient.cleanup(); } } catch (DBException e) { e.printStackTrace(); } } @Before public void prepareTest() { try { DatabaseMetaData metaData = jdbcConnection.getMetaData(); ResultSet tableResults = metaData.getTables(null, null, TABLE_NAME, null); if (tableResults.next()) { // If the table already exists, just truncate it jdbcConnection.prepareStatement( String.format("TRUNCATE TABLE %s", TABLE_NAME) ).execute(); } else { // If the table does not exist then create it StringBuilder createString = new StringBuilder( String.format("CREATE TABLE %s (%s VARCHAR(100) PRIMARY KEY", TABLE_NAME, KEY_FIELD) ); for (int i = 0; i < NUM_FIELDS; i++) { createString.append( String.format(", %s%d VARCHAR(100)", FIELD_PREFIX, i) ); } createString.append(")"); jdbcConnection.prepareStatement(createString.toString()).execute(); } } catch (SQLException e) { e.printStackTrace(); fail("Failed to prepare test"); } } /* This is a copy of buildDeterministicValue() from core:com.yahoo.ycsb.workloads.CoreWorkload.java. That method is neither public nor static so we need a copy. */ private String buildDeterministicValue(String key, String fieldkey) { int size = FIELD_LENGTH; StringBuilder sb = new StringBuilder(size); sb.append(key); sb.append(':'); sb.append(fieldkey); while (sb.length() < size) { sb.append(':'); sb.append(sb.toString().hashCode()); } sb.setLength(size); return sb.toString(); } /* Inserts a row of deterministic values for the given insertKey using the jdbcDBClient. */ private HashMap insertRow(String insertKey) { HashMap insertMap = new HashMap(); for (int i = 0; i < 3; i++) { insertMap.put(FIELD_PREFIX + i, new StringByteIterator(buildDeterministicValue(insertKey, FIELD_PREFIX + i))); } jdbcDBClient.insert(TABLE_NAME, insertKey, insertMap); return insertMap; } @Test public void insertTest() { try { String insertKey = "user0"; HashMap insertMap = insertRow(insertKey); ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s", TABLE_NAME) ).executeQuery(); // Check we have a result Row assertTrue(resultSet.next()); // Check that all the columns have expected values assertEquals(resultSet.getString(KEY_FIELD), insertKey); for (int i = 0; i < 3; i++) { assertEquals(resultSet.getString(FIELD_PREFIX + i), insertMap.get(FIELD_PREFIX + i).toString()); } // Check that we do not have any more rows assertFalse(resultSet.next()); resultSet.close(); } catch (SQLException e) { e.printStackTrace(); fail("Failed insertTest"); } } @Test public void updateTest() { try { String preupdateString = "preupdate"; StringBuilder fauxInsertString = new StringBuilder( String.format("INSERT INTO %s VALUES(?", TABLE_NAME) ); for (int i = 0; i < NUM_FIELDS; i++) { fauxInsertString.append(",?"); } fauxInsertString.append(")"); PreparedStatement fauxInsertStatement = jdbcConnection.prepareStatement(fauxInsertString.toString()); for (int i = 2; i < NUM_FIELDS + 2; i++) { fauxInsertStatement.setString(i, preupdateString); } fauxInsertStatement.setString(1, "user0"); fauxInsertStatement.execute(); fauxInsertStatement.setString(1, "user1"); fauxInsertStatement.execute(); fauxInsertStatement.setString(1, "user2"); fauxInsertStatement.execute(); HashMap updateMap = new HashMap(); for (int i = 0; i < 3; i++) { updateMap.put(FIELD_PREFIX + i, new StringByteIterator(buildDeterministicValue("user1", FIELD_PREFIX + i))); } jdbcDBClient.update(TABLE_NAME, "user1", updateMap); ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s ORDER BY %s", TABLE_NAME, KEY_FIELD) ).executeQuery(); // Ensure that user0 record was not changed resultSet.next(); assertEquals("Assert first row key is user0", resultSet.getString(KEY_FIELD), "user0"); for (int i = 0; i < 3; i++) { assertEquals("Assert first row fields contain preupdateString", resultSet.getString(FIELD_PREFIX + i), preupdateString); } // Check that all the columns have expected values for user1 record resultSet.next(); assertEquals(resultSet.getString(KEY_FIELD), "user1"); for (int i = 0; i < 3; i++) { assertEquals(resultSet.getString(FIELD_PREFIX + i), updateMap.get(FIELD_PREFIX + i).toString()); } // Ensure that user2 record was not changed resultSet.next(); assertEquals("Assert third row key is user2", resultSet.getString(KEY_FIELD), "user2"); for (int i = 0; i < 3; i++) { assertEquals("Assert third row fields contain preupdateString", resultSet.getString(FIELD_PREFIX + i), preupdateString); } resultSet.close(); } catch (SQLException e) { e.printStackTrace(); fail("Failed updateTest"); } } @Test public void readTest() { String insertKey = "user0"; HashMap insertMap = insertRow(insertKey); - HashSet readFields = new HashSet(); + Set readFields = new HashSet(); HashMap readResultMap = new HashMap(); // Test reading a single field readFields.add("FIELD0"); jdbcDBClient.read(TABLE_NAME, insertKey, readFields, readResultMap); assertEquals("Assert that result has correct number of fields", readFields.size(), readResultMap.size()); for (String field: readFields) { assertEquals("Assert " + field + " was read correctly", insertMap.get(field).toString(), readResultMap.get(field).toString()); } readResultMap = new HashMap(); // Test reading all fields readFields.add("FIELD1"); readFields.add("FIELD2"); jdbcDBClient.read(TABLE_NAME, insertKey, readFields, readResultMap); assertEquals("Assert that result has correct number of fields", readFields.size(), readResultMap.size()); for (String field: readFields) { assertEquals("Assert " + field + " was read correctly", insertMap.get(field).toString(), readResultMap.get(field).toString()); } } @Test public void deleteTest() { try { insertRow("user0"); String deleteKey = "user1"; insertRow(deleteKey); insertRow("user2"); jdbcDBClient.delete(TABLE_NAME, deleteKey); ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s", TABLE_NAME) ).executeQuery(); int totalRows = 0; while (resultSet.next()) { assertNotEquals("Assert this is not the deleted row key", deleteKey, resultSet.getString(KEY_FIELD)); totalRows++; } // Check we do not have a result Row assertEquals("Assert we ended with the correct number of rows", totalRows, 2); resultSet.close(); } catch (SQLException e) { e.printStackTrace(); fail("Failed deleteTest"); } } @Test public void scanTest() throws SQLException { - HashMap> keyMap = new HashMap>(); + Map> keyMap = new HashMap>(); for (int i = 0; i < 5; i++) { String insertKey = KEY_PREFIX + i; keyMap.put(insertKey, insertRow(insertKey)); } - HashSet fieldSet = new HashSet(); + Set fieldSet = new HashSet(); fieldSet.add("FIELD0"); fieldSet.add("FIELD1"); int startIndex = 1; int resultRows = 3; Vector> resultVector = new Vector>(); jdbcDBClient.scan(TABLE_NAME, KEY_PREFIX + startIndex, resultRows, fieldSet, resultVector); // Check the resultVector is the correct size assertEquals("Assert the correct number of results rows were returned", resultRows, resultVector.size()); // Check each vector row to make sure we have the correct fields int testIndex = startIndex; - for (HashMap result: resultVector) { + for (Map result: resultVector) { assertEquals("Assert that this row has the correct number of fields", fieldSet.size(), result.size()); for (String field: fieldSet) { assertEquals("Assert this field is correct in this row", keyMap.get(KEY_PREFIX + testIndex).get(field).toString(), result.get(field).toString()); } testIndex++; } } @Test public void insertBatchTest() throws DBException { insertBatchTest(20); } @Test public void insertPartialBatchTest() throws DBException { insertBatchTest(19); } public void insertBatchTest(int numRows) throws DBException { teardown(); setupWithBatch(10, false); try { String insertKey = "user0"; HashMap insertMap = insertRow(insertKey); assertEquals(3, insertMap.size()); ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s", TABLE_NAME) ).executeQuery(); // Check we do not have a result Row (because batch is not full yet) assertFalse(resultSet.next()); // insert more rows, completing 1 batch (still results are partial). for (int i = 1; i < numRows; i++) { insertMap = insertRow("user" + i); } // assertNumRows(10 * (numRows / 10)); // call cleanup, which should insert the partial batch jdbcDBClient.cleanup(); // Prevent a teardown() from printing an error jdbcDBClient = null; // Check that we have all rows assertNumRows(numRows); } catch (SQLException e) { e.printStackTrace(); fail("Failed insertBatchTest"); } finally { teardown(); // for next tests setup(); } } private void assertNumRows(long numRows) throws SQLException { ResultSet resultSet = jdbcConnection.prepareStatement( String.format("SELECT * FROM %s", TABLE_NAME) ).executeQuery(); for (int i = 0; i < numRows; i++) { assertTrue("expecting " + numRows + " results, received only " + i, resultSet.next()); } assertFalse("expecting " + numRows + " results, received more", resultSet.next()); resultSet.close(); } } diff --git a/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java b/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java index b7ae0e2b..ebd6a822 100644 --- a/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java +++ b/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java @@ -1,321 +1,320 @@ /** * Copyright (c) 2015-2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.stumbleupon.async.TimeoutException; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import com.yahoo.ycsb.workloads.CoreWorkload; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.kudu.ColumnSchema; import org.apache.kudu.Schema; import org.apache.kudu.client.*; import java.util.ArrayList; import java.util.HashMap; +import java.util.Map; import java.util.List; import java.util.Properties; import java.util.Set; import java.util.Vector; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; import static org.apache.kudu.Type.STRING; import static org.apache.kudu.client.KuduPredicate.ComparisonOp.EQUAL; import static org.apache.kudu.client.KuduPredicate.ComparisonOp.GREATER_EQUAL; /** * Kudu client for YCSB framework. Example to load:

* *
  * 
  * $ ./bin/ycsb load kudu -P workloads/workloada -threads 5
  * 
  * 
* *
Example to run:
* *
  * 
  * ./bin/ycsb run kudu -P workloads/workloada -p kudu_sync_ops=true -threads 5
  * 
  * 
* *
*/ public class KuduYCSBClient extends com.yahoo.ycsb.DB { private static final Logger LOG = LoggerFactory.getLogger(KuduYCSBClient.class); private static final String KEY = "key"; private static final Status TIMEOUT = new Status("TIMEOUT", "The operation timed out."); private static final int MAX_TABLETS = 9000; private static final long DEFAULT_SLEEP = 60000; private static final String SYNC_OPS_OPT = "kudu_sync_ops"; private static final String PRE_SPLIT_NUM_TABLETS_OPT = "kudu_pre_split_num_tablets"; private static final String TABLE_NUM_REPLICAS = "kudu_table_num_replicas"; private static final String BLOCK_SIZE_OPT = "kudu_block_size"; private static final String MASTER_ADDRESSES_OPT = "kudu_master_addresses"; private static final int BLOCK_SIZE_DEFAULT = 4096; private static final List COLUMN_NAMES = new ArrayList<>(); private static KuduClient client; private static Schema schema; private KuduSession session; private KuduTable kuduTable; @Override public void init() throws DBException { String tableName = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); initClient(tableName, getProperties()); this.session = client.newSession(); if (getProperties().getProperty(SYNC_OPS_OPT) != null && getProperties().getProperty(SYNC_OPS_OPT).equals("false")) { this.session.setFlushMode(KuduSession.FlushMode.AUTO_FLUSH_BACKGROUND); this.session.setMutationBufferSpace(100); } else { this.session.setFlushMode(KuduSession.FlushMode.AUTO_FLUSH_SYNC); } try { this.kuduTable = client.openTable(tableName); } catch (Exception e) { throw new DBException("Could not open a table because of:", e); } } private static synchronized void initClient(String tableName, Properties prop) throws DBException { if (client != null) { return; } String masterAddresses = prop.getProperty(MASTER_ADDRESSES_OPT); if (masterAddresses == null) { masterAddresses = "localhost:7051"; } int numTablets = getIntFromProp(prop, PRE_SPLIT_NUM_TABLETS_OPT, 4); if (numTablets > MAX_TABLETS) { throw new DBException(String.format( "Specified number of tablets (%s) must be equal or below %s", numTablets, MAX_TABLETS)); } int numReplicas = getIntFromProp(prop, TABLE_NUM_REPLICAS, 3); int blockSize = getIntFromProp(prop, BLOCK_SIZE_OPT, BLOCK_SIZE_DEFAULT); client = new KuduClient.KuduClientBuilder(masterAddresses) .defaultSocketReadTimeoutMs(DEFAULT_SLEEP) .defaultOperationTimeoutMs(DEFAULT_SLEEP) .defaultAdminOperationTimeoutMs(DEFAULT_SLEEP) .build(); LOG.debug("Connecting to the masters at {}", masterAddresses); int fieldCount = getIntFromProp(prop, CoreWorkload.FIELD_COUNT_PROPERTY, Integer.parseInt(CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); List columns = new ArrayList<>(fieldCount + 1); ColumnSchema keyColumn = new ColumnSchema.ColumnSchemaBuilder(KEY, STRING) .key(true) .desiredBlockSize(blockSize) .build(); columns.add(keyColumn); COLUMN_NAMES.add(KEY); for (int i = 0; i < fieldCount; i++) { String name = "field" + i; COLUMN_NAMES.add(name); columns.add(new ColumnSchema.ColumnSchemaBuilder(name, STRING) .desiredBlockSize(blockSize) .build()); } schema = new Schema(columns); CreateTableOptions builder = new CreateTableOptions(); builder.setRangePartitionColumns(new ArrayList()); List hashPartitionColumns = new ArrayList<>(); hashPartitionColumns.add(KEY); builder.addHashPartitions(hashPartitionColumns, numTablets); builder.setNumReplicas(numReplicas); try { client.createTable(tableName, schema, builder); } catch (Exception e) { if (!e.getMessage().contains("already exists")) { throw new DBException("Couldn't create the table", e); } } } private static int getIntFromProp(Properties prop, String propName, int defaultValue) throws DBException { String intStr = prop.getProperty(propName); if (intStr == null) { return defaultValue; } else { try { return Integer.valueOf(intStr); } catch (NumberFormatException ex) { throw new DBException("Provided number for " + propName + " isn't a valid integer"); } } } @Override public void cleanup() throws DBException { try { this.session.close(); } catch (Exception e) { throw new DBException("Couldn't cleanup the session", e); } } @Override - public Status read(String table, - String key, - Set fields, - HashMap result) { + public Status read(String table, String key, Set fields, + Map result) { Vector> results = new Vector<>(); final Status status = scan(table, key, 1, fields, results); if (!status.equals(Status.OK)) { return status; } if (results.size() != 1) { return Status.NOT_FOUND; } result.putAll(results.firstElement()); return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { try { KuduScanner.KuduScannerBuilder scannerBuilder = client.newScannerBuilder(kuduTable); List querySchema; if (fields == null) { querySchema = COLUMN_NAMES; // No need to set the projected columns with the whole schema. } else { querySchema = new ArrayList<>(fields); scannerBuilder.setProjectedColumnNames(querySchema); } ColumnSchema column = schema.getColumnByIndex(0); KuduPredicate.ComparisonOp predicateOp = recordcount == 1 ? EQUAL : GREATER_EQUAL; KuduPredicate predicate = KuduPredicate.newComparisonPredicate(column, predicateOp, startkey); scannerBuilder.addPredicate(predicate); scannerBuilder.limit(recordcount); // currently noop KuduScanner scanner = scannerBuilder.build(); while (scanner.hasMoreRows()) { RowResultIterator data = scanner.nextRows(); addAllRowsToResult(data, recordcount, querySchema, result); if (recordcount == result.size()) { break; } } RowResultIterator closer = scanner.close(); addAllRowsToResult(closer, recordcount, querySchema, result); } catch (TimeoutException te) { LOG.info("Waited too long for a scan operation with start key={}", startkey); return TIMEOUT; } catch (Exception e) { LOG.warn("Unexpected exception", e); return Status.ERROR; } return Status.OK; } private void addAllRowsToResult(RowResultIterator it, int recordcount, List querySchema, Vector> result) throws Exception { RowResult row; HashMap rowResult = new HashMap<>(querySchema.size()); if (it == null) { return; } while (it.hasNext()) { if (result.size() == recordcount) { return; } row = it.next(); int colIdx = 0; for (String col : querySchema) { rowResult.put(col, new StringByteIterator(row.getString(colIdx))); colIdx++; } result.add(rowResult); } } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { Update update = this.kuduTable.newUpdate(); PartialRow row = update.getRow(); row.addString(KEY, key); for (int i = 1; i < schema.getColumnCount(); i++) { String columnName = schema.getColumnByIndex(i).getName(); if (values.containsKey(columnName)) { String value = values.get(columnName).toString(); row.addString(columnName, value); } } apply(update); return Status.OK; } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { Insert insert = this.kuduTable.newInsert(); PartialRow row = insert.getRow(); row.addString(KEY, key); for (int i = 1; i < schema.getColumnCount(); i++) { row.addString(i, values.get(schema.getColumnByIndex(i).getName()).toString()); } apply(insert); return Status.OK; } @Override public Status delete(String table, String key) { Delete delete = this.kuduTable.newDelete(); PartialRow row = delete.getRow(); row.addString(KEY, key); apply(delete); return Status.OK; } private void apply(Operation op) { try { OperationResponse response = session.apply(op); if (response != null && response.hasRowError()) { LOG.info("Write operation failed: {}", response.getRowError()); } } catch (KuduException ex) { LOG.warn("Write operation failed", ex); } } } diff --git a/mapkeeper/src/main/java/com/yahoo/ycsb/db/MapKeeperClient.java b/mapkeeper/src/main/java/com/yahoo/ycsb/db/MapKeeperClient.java index 012a96cb..0d50c41d 100644 --- a/mapkeeper/src/main/java/com/yahoo/ycsb/db/MapKeeperClient.java +++ b/mapkeeper/src/main/java/com/yahoo/ycsb/db/MapKeeperClient.java @@ -1,219 +1,219 @@ /** * Copyright (c) 2012 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Properties; import java.util.Set; import java.util.Vector; import org.apache.thrift.TException; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.transport.TFramedTransport; import org.apache.thrift.transport.TSocket; import org.apache.thrift.transport.TTransport; import com.yahoo.mapkeeper.BinaryResponse; import com.yahoo.mapkeeper.MapKeeper; import com.yahoo.mapkeeper.Record; import com.yahoo.mapkeeper.RecordListResponse; import com.yahoo.mapkeeper.ResponseCode; import com.yahoo.mapkeeper.ScanOrder; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.StringByteIterator; import com.yahoo.ycsb.workloads.CoreWorkload; public class MapKeeperClient extends DB { private static final String HOST = "mapkeeper.host"; private static final String HOST_DEFAULT = "localhost"; private static final String PORT = "mapkeeper.port"; private static final String PORT_DEFAULT = "9090"; MapKeeper.Client c; boolean writeallfields; static boolean initteddb = false; private synchronized static void initDB(Properties p, MapKeeper.Client c) throws TException { if(!initteddb) { initteddb = true; c.addMap(p.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT)); } } public void init() { String host = getProperties().getProperty(HOST, HOST_DEFAULT); int port = Integer.parseInt(getProperties().getProperty(PORT, PORT_DEFAULT)); TTransport tr = new TFramedTransport(new TSocket(host, port)); TProtocol proto = new TBinaryProtocol(tr); c = new MapKeeper.Client(proto); try { tr.open(); initDB(getProperties(), c); } catch(TException e) { throw new RuntimeException(e); } writeallfields = Boolean.parseBoolean(getProperties().getProperty(CoreWorkload.WRITE_ALL_FIELDS_PROPERTY, CoreWorkload.WRITE_ALL_FIELDS_PROPERTY_DEFAULT)); } ByteBuffer encode(HashMap values) { int len = 0; for(Map.Entry entry : values.entrySet()) { len += (entry.getKey().length() + 1 + entry.getValue().bytesLeft() + 1); } byte[] array = new byte[len]; int i = 0; for(Map.Entry entry : values.entrySet()) { for(int j = 0; j < entry.getKey().length(); j++) { array[i] = (byte)entry.getKey().charAt(j); i++; } array[i] = '\t'; // XXX would like to use sane delimiter (null, 254, 255, ...) but java makes this nearly impossible i++; ByteIterator v = entry.getValue(); i = v.nextBuf(array, i); array[i] = '\t'; i++; } array[array.length-1] = 0; ByteBuffer buf = ByteBuffer.wrap(array); buf.rewind(); return buf; } void decode(Set fields, String tups, HashMap tup) { String[] tok = tups.split("\\t"); if(tok.length == 0) { throw new IllegalStateException("split returned empty array!"); } for(int i = 0; i < tok.length; i+=2) { if(fields == null || fields.contains(tok[i])) { if(tok.length < i+2) { throw new IllegalStateException("Couldn't parse tuple <" + tups + "> at index " + i); } if(tok[i] == null || tok[i+1] == null) throw new NullPointerException("Key is " + tok[i] + " val is + " + tok[i+1]); tup.put(tok[i], new StringByteIterator(tok[i+1])); } } if(tok.length == 0) { System.err.println("Empty tuple: " + tups); } } int ycsbThriftRet(BinaryResponse succ, ResponseCode zero, ResponseCode one) { return ycsbThriftRet(succ.responseCode, zero, one); } int ycsbThriftRet(ResponseCode rc, ResponseCode zero, ResponseCode one) { return rc == zero ? 0 : rc == one ? 1 : 2; } ByteBuffer bufStr(String str) { ByteBuffer buf = ByteBuffer.wrap(str.getBytes()); return buf; } String strResponse(BinaryResponse buf) { return new String(buf.value.array()); } @Override public int read(String table, String key, Set fields, - HashMap result) { + Map result) { try { ByteBuffer buf = bufStr(key); BinaryResponse succ = c.get(table, buf); int ret = ycsbThriftRet( succ, ResponseCode.RecordExists, ResponseCode.RecordNotFound); if(ret == 0) { decode(fields, strResponse(succ), result); } return ret; } catch(TException e) { e.printStackTrace(); return 2; } } @Override public int scan(String table, String startkey, int recordcount, Set fields, Vector> result) { try { //XXX what to pass in for nulls / zeros? RecordListResponse res = c.scan(table, ScanOrder.Ascending, bufStr(startkey), true, null, false, recordcount, 0); int ret = ycsbThriftRet(res.responseCode, ResponseCode.Success, ResponseCode.ScanEnded); if(ret == 0) { for(Record r : res.records) { HashMap tuple = new HashMap(); // Note: r.getKey() and r.getValue() call special helper methods that trim the buffer // to an appropriate length, and memcpy it to a byte[]. Trying to manipulate the ByteBuffer // directly leads to trouble. tuple.put("key", new StringByteIterator(new String(r.getKey()))); decode(fields, new String(r.getValue())/*strBuf(r.bufferForValue())*/, tuple); result.add(tuple); } } return ret; } catch(TException e) { e.printStackTrace(); return 2; } } @Override public int update(String table, String key, - HashMap values) { + Map values) { try { if(!writeallfields) { HashMap oldval = new HashMap(); read(table, key, null, oldval); for(Map.Entry entry : values.entrySet()) { oldval.put(entry.getKey(), entry.getValue())); } values = oldval; } ResponseCode succ = c.update(table, bufStr(key), encode(values)); return ycsbThriftRet(succ, ResponseCode.RecordExists, ResponseCode.RecordNotFound); } catch(TException e) { e.printStackTrace(); return 2; } } @Override public int insert(String table, String key, - HashMap values) { + Map values) { try { int ret = ycsbThriftRet(c.insert(table, bufStr(key), encode(values)), ResponseCode.Success, ResponseCode.RecordExists); return ret; } catch(TException e) { e.printStackTrace(); return 2; } } @Override public int delete(String table, String key) { try { return ycsbThriftRet(c.remove(table, bufStr(key)), ResponseCode.Success, ResponseCode.RecordExists); } catch(TException e) { e.printStackTrace(); return 2; } } } diff --git a/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java b/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java index 85f1f5dc..b8f69384 100644 --- a/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java +++ b/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java @@ -1,303 +1,303 @@ /** * Copyright (c) 2014-2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import java.io.IOException; import java.io.StringWriter; import java.io.Writer; import java.net.InetSocketAddress; import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.Vector; import net.spy.memcached.ConnectionFactoryBuilder; import net.spy.memcached.FailureMode; // We also use `net.spy.memcached.MemcachedClient`; it is not imported // explicitly and referred to with its full path to avoid conflicts with the // class of the same name in this file. import net.spy.memcached.internal.GetFuture; import net.spy.memcached.internal.OperationFuture; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonGenerator; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.node.ObjectNode; import org.apache.log4j.Logger; import static java.util.concurrent.TimeUnit.MILLISECONDS; /** * Concrete Memcached client implementation. */ public class MemcachedClient extends DB { private final Logger logger = Logger.getLogger(getClass()); protected static final ObjectMapper MAPPER = new ObjectMapper(); private boolean checkOperationStatus; private long shutdownTimeoutMillis; private int objectExpirationTime; public static final String HOSTS_PROPERTY = "memcached.hosts"; public static final int DEFAULT_PORT = 11211; private static final String TEMPORARY_FAILURE_MSG = "Temporary failure"; private static final String CANCELLED_MSG = "cancelled"; public static final String SHUTDOWN_TIMEOUT_MILLIS_PROPERTY = "memcached.shutdownTimeoutMillis"; public static final String DEFAULT_SHUTDOWN_TIMEOUT_MILLIS = "30000"; public static final String OBJECT_EXPIRATION_TIME_PROPERTY = "memcached.objectExpirationTime"; public static final String DEFAULT_OBJECT_EXPIRATION_TIME = String.valueOf(Integer.MAX_VALUE); public static final String CHECK_OPERATION_STATUS_PROPERTY = "memcached.checkOperationStatus"; public static final String CHECK_OPERATION_STATUS_DEFAULT = "true"; public static final String READ_BUFFER_SIZE_PROPERTY = "memcached.readBufferSize"; public static final String DEFAULT_READ_BUFFER_SIZE = "3000000"; public static final String OP_TIMEOUT_PROPERTY = "memcached.opTimeoutMillis"; public static final String DEFAULT_OP_TIMEOUT = "60000"; public static final String FAILURE_MODE_PROPERTY = "memcached.failureMode"; public static final FailureMode FAILURE_MODE_PROPERTY_DEFAULT = FailureMode.Redistribute; public static final String PROTOCOL_PROPERTY = "memcached.protocol"; public static final ConnectionFactoryBuilder.Protocol DEFAULT_PROTOCOL = ConnectionFactoryBuilder.Protocol.TEXT; /** * The MemcachedClient implementation that will be used to communicate * with the memcached server. */ private net.spy.memcached.MemcachedClient client; /** * @returns Underlying Memcached protocol client, implemented by * SpyMemcached. */ protected net.spy.memcached.MemcachedClient memcachedClient() { return client; } @Override public void init() throws DBException { try { client = createMemcachedClient(); checkOperationStatus = Boolean.parseBoolean( getProperties().getProperty(CHECK_OPERATION_STATUS_PROPERTY, CHECK_OPERATION_STATUS_DEFAULT)); objectExpirationTime = Integer.parseInt( getProperties().getProperty(OBJECT_EXPIRATION_TIME_PROPERTY, DEFAULT_OBJECT_EXPIRATION_TIME)); shutdownTimeoutMillis = Integer.parseInt( getProperties().getProperty(SHUTDOWN_TIMEOUT_MILLIS_PROPERTY, DEFAULT_SHUTDOWN_TIMEOUT_MILLIS)); } catch (Exception e) { throw new DBException(e); } } protected net.spy.memcached.MemcachedClient createMemcachedClient() throws Exception { ConnectionFactoryBuilder connectionFactoryBuilder = new ConnectionFactoryBuilder(); connectionFactoryBuilder.setReadBufferSize(Integer.parseInt( getProperties().getProperty(READ_BUFFER_SIZE_PROPERTY, DEFAULT_READ_BUFFER_SIZE))); connectionFactoryBuilder.setOpTimeout(Integer.parseInt( getProperties().getProperty(OP_TIMEOUT_PROPERTY, DEFAULT_OP_TIMEOUT))); String protocolString = getProperties().getProperty(PROTOCOL_PROPERTY); connectionFactoryBuilder.setProtocol( protocolString == null ? DEFAULT_PROTOCOL : ConnectionFactoryBuilder.Protocol.valueOf(protocolString.toUpperCase())); String failureString = getProperties().getProperty(FAILURE_MODE_PROPERTY); connectionFactoryBuilder.setFailureMode( failureString == null ? FAILURE_MODE_PROPERTY_DEFAULT : FailureMode.valueOf(failureString)); // Note: this only works with IPv4 addresses due to its assumption of // ":" being the separator of hostname/IP and port; this is not the case // when dealing with IPv6 addresses. // // TODO(mbrukman): fix this. List addresses = new ArrayList(); String[] hosts = getProperties().getProperty(HOSTS_PROPERTY).split(","); for (String address : hosts) { int colon = address.indexOf(":"); int port = DEFAULT_PORT; String host = address; if (colon != -1) { port = Integer.parseInt(address.substring(colon + 1)); host = address.substring(0, colon); } addresses.add(new InetSocketAddress(host, port)); } return new net.spy.memcached.MemcachedClient( connectionFactoryBuilder.build(), addresses); } @Override public Status read( String table, String key, Set fields, - HashMap result) { + Map result) { key = createQualifiedKey(table, key); try { GetFuture future = memcachedClient().asyncGet(key); Object document = future.get(); if (document != null) { fromJson((String) document, fields, result); } return Status.OK; } catch (Exception e) { logger.error("Error encountered for key: " + key, e); return Status.ERROR; } } @Override public Status scan( String table, String startkey, int recordcount, Set fields, Vector> result){ return Status.NOT_IMPLEMENTED; } @Override public Status update( - String table, String key, HashMap values) { + String table, String key, Map values) { key = createQualifiedKey(table, key); try { OperationFuture future = memcachedClient().replace(key, objectExpirationTime, toJson(values)); return getReturnCode(future); } catch (Exception e) { logger.error("Error updating value with key: " + key, e); return Status.ERROR; } } @Override public Status insert( - String table, String key, HashMap values) { + String table, String key, Map values) { key = createQualifiedKey(table, key); try { OperationFuture future = memcachedClient().add(key, objectExpirationTime, toJson(values)); return getReturnCode(future); } catch (Exception e) { logger.error("Error inserting value", e); return Status.ERROR; } } @Override public Status delete(String table, String key) { key = createQualifiedKey(table, key); try { OperationFuture future = memcachedClient().delete(key); return getReturnCode(future); } catch (Exception e) { logger.error("Error deleting value", e); return Status.ERROR; } } protected Status getReturnCode(OperationFuture future) { if (!checkOperationStatus) { return Status.OK; } if (future.getStatus().isSuccess()) { return Status.OK; } else if (TEMPORARY_FAILURE_MSG.equals(future.getStatus().getMessage())) { return new Status("TEMPORARY_FAILURE", TEMPORARY_FAILURE_MSG); } else if (CANCELLED_MSG.equals(future.getStatus().getMessage())) { return new Status("CANCELLED_MSG", CANCELLED_MSG); } return new Status("ERROR", future.getStatus().getMessage()); } @Override public void cleanup() throws DBException { if (client != null) { memcachedClient().shutdown(shutdownTimeoutMillis, MILLISECONDS); } } protected static String createQualifiedKey(String table, String key) { return MessageFormat.format("{0}-{1}", table, key); } protected static void fromJson( String value, Set fields, Map result) throws IOException { JsonNode json = MAPPER.readTree(value); boolean checkFields = fields != null && !fields.isEmpty(); for (Iterator> jsonFields = json.getFields(); jsonFields.hasNext(); /* increment in loop body */) { Map.Entry jsonField = jsonFields.next(); String name = jsonField.getKey(); if (checkFields && fields.contains(name)) { continue; } JsonNode jsonValue = jsonField.getValue(); if (jsonValue != null && !jsonValue.isNull()) { result.put(name, new StringByteIterator(jsonValue.asText())); } } } protected static String toJson(Map values) throws IOException { ObjectNode node = MAPPER.createObjectNode(); - HashMap stringMap = StringByteIterator.getStringMap(values); + Map stringMap = StringByteIterator.getStringMap(values); for (Map.Entry pair : stringMap.entrySet()) { node.put(pair.getKey(), pair.getValue()); } JsonFactory jsonFactory = new JsonFactory(); Writer writer = new StringWriter(); JsonGenerator jsonGenerator = jsonFactory.createJsonGenerator(writer); MAPPER.writeTree(jsonGenerator, node); return writer.toString(); } } diff --git a/mongodb/src/main/java/com/yahoo/ycsb/db/AsyncMongoDbClient.java b/mongodb/src/main/java/com/yahoo/ycsb/db/AsyncMongoDbClient.java index a977fbe4..3bbb5fbd 100644 --- a/mongodb/src/main/java/com/yahoo/ycsb/db/AsyncMongoDbClient.java +++ b/mongodb/src/main/java/com/yahoo/ycsb/db/AsyncMongoDbClient.java @@ -1,550 +1,550 @@ /* * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import static com.allanbank.mongodb.builder.QueryBuilder.where; import com.allanbank.mongodb.Durability; import com.allanbank.mongodb.LockType; import com.allanbank.mongodb.MongoClient; import com.allanbank.mongodb.MongoClientConfiguration; import com.allanbank.mongodb.MongoCollection; import com.allanbank.mongodb.MongoDatabase; import com.allanbank.mongodb.MongoDbUri; import com.allanbank.mongodb.MongoFactory; import com.allanbank.mongodb.MongoIterator; import com.allanbank.mongodb.ReadPreference; import com.allanbank.mongodb.bson.Document; import com.allanbank.mongodb.bson.Element; import com.allanbank.mongodb.bson.ElementType; import com.allanbank.mongodb.bson.builder.BuilderFactory; import com.allanbank.mongodb.bson.builder.DocumentBuilder; import com.allanbank.mongodb.bson.element.BinaryElement; import com.allanbank.mongodb.builder.BatchedWrite; import com.allanbank.mongodb.builder.BatchedWriteMode; import com.allanbank.mongodb.builder.Find; import com.allanbank.mongodb.builder.Sort; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.util.concurrent.atomic.AtomicInteger; /** * MongoDB asynchronous client for YCSB framework using the Asynchronous Java * Driver *

* See the README.md for configuration information. *

* * @author rjm * @see Asynchronous * Java Driver */ public class AsyncMongoDbClient extends DB { /** Used to include a field in a response. */ protected static final int INCLUDE = 1; /** The database to use. */ private static String databaseName; /** Thread local document builder. */ private static final ThreadLocal DOCUMENT_BUILDER = new ThreadLocal() { @Override protected DocumentBuilder initialValue() { return BuilderFactory.start(); } }; /** The write concern for the requests. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** The connection to MongoDB. */ private static MongoClient mongoClient; /** The write concern for the requests. */ private static Durability writeConcern; /** Which servers to use for reads. */ private static ReadPreference readPreference; /** The database to MongoDB. */ private MongoDatabase database; /** The batch size to use for inserts. */ private static int batchSize; /** If true then use updates with the upsert option for inserts. */ private static boolean useUpsert; /** The bulk inserts pending for the thread. */ private final BatchedWrite.Builder batchedWrite = BatchedWrite.builder() .mode(BatchedWriteMode.REORDERED); /** The number of writes in the batchedWrite. */ private int batchedWriteCount = 0; /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public final void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { try { mongoClient.close(); } catch (final Exception e1) { System.err.println("Could not close MongoDB connection pool: " + e1.toString()); e1.printStackTrace(); return; } finally { mongoClient = null; database = null; } } } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public final Status delete(final String table, final String key) { try { final MongoCollection collection = database.getCollection(table); final Document q = BuilderFactory.start().add("_id", key).build(); final long res = collection.delete(q, writeConcern); if (res == 0) { System.err.println("Nothing deleted for key " + key); return Status.NOT_FOUND; } return Status.OK; } catch (final Exception e) { System.err.println(e.toString()); return Status.ERROR; } } /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public final void init() throws DBException { final int count = INIT_COUNT.incrementAndGet(); synchronized (AsyncMongoDbClient.class) { final Properties props = getProperties(); if (mongoClient != null) { database = mongoClient.getDatabase(databaseName); // If there are more threads (count) than connections then the // Low latency spin lock is not really needed as we will keep // the connections occupied. if (count > mongoClient.getConfig().getMaxConnectionCount()) { mongoClient.getConfig().setLockType(LockType.MUTEX); } return; } // Set insert batchsize, default 1 - to be YCSB-original equivalent batchSize = Integer.parseInt(props.getProperty("mongodb.batchsize", "1")); // Set is inserts are done as upserts. Defaults to false. useUpsert = Boolean.parseBoolean( props.getProperty("mongodb.upsert", "false")); // Just use the standard connection format URL // http://docs.mongodb.org/manual/reference/connection-string/ // to configure the client. String url = props .getProperty("mongodb.url", "mongodb://localhost:27017/ycsb?w=1"); if (!url.startsWith("mongodb://")) { System.err.println("ERROR: Invalid URL: '" + url + "'. Must be of the form " + "'mongodb://:,:/database?" + "options'. See " + "http://docs.mongodb.org/manual/reference/connection-string/."); System.exit(1); } MongoDbUri uri = new MongoDbUri(url); try { databaseName = uri.getDatabase(); if ((databaseName == null) || databaseName.isEmpty()) { // Default database is "ycsb" if database is not // specified in URL databaseName = "ycsb"; } mongoClient = MongoFactory.createClient(uri); MongoClientConfiguration config = mongoClient.getConfig(); if (!url.toLowerCase().contains("locktype=")) { config.setLockType(LockType.LOW_LATENCY_SPIN); // assumed... } readPreference = config.getDefaultReadPreference(); writeConcern = config.getDefaultDurability(); database = mongoClient.getDatabase(databaseName); System.out.println("mongo connection created with " + url); } catch (final Exception e1) { System.err .println("Could not initialize MongoDB connection pool for Loader: " + e1.toString()); e1.printStackTrace(); return; } } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the {@link DB} * class's description for a discussion of error codes. */ @Override public final Status insert(final String table, final String key, - final HashMap values) { + final Map values) { try { final MongoCollection collection = database.getCollection(table); final DocumentBuilder toInsert = DOCUMENT_BUILDER.get().reset().add("_id", key); final Document query = toInsert.build(); for (final Map.Entry entry : values.entrySet()) { toInsert.add(entry.getKey(), entry.getValue().toArray()); } // Do an upsert. if (batchSize <= 1) { long result; if (useUpsert) { result = collection.update(query, toInsert, /* multi= */false, /* upsert= */true, writeConcern); } else { // Return is not stable pre-SERVER-4381. No exception is success. collection.insert(writeConcern, toInsert); result = 1; } return result == 1 ? Status.OK : Status.NOT_FOUND; } // Use a bulk insert. try { if (useUpsert) { batchedWrite.update(query, toInsert, /* multi= */false, /* upsert= */true); } else { batchedWrite.insert(toInsert); } batchedWriteCount += 1; if (batchedWriteCount < batchSize) { return Status.BATCHED_OK; } long count = collection.write(batchedWrite); if (count == batchedWriteCount) { batchedWrite.reset().mode(BatchedWriteMode.REORDERED); batchedWriteCount = 0; return Status.OK; } System.err.println("Number of inserted documents doesn't match the " + "number sent, " + count + " inserted, sent " + batchedWriteCount); batchedWrite.reset().mode(BatchedWriteMode.REORDERED); batchedWriteCount = 0; return Status.ERROR; } catch (Exception e) { System.err.println("Exception while trying bulk insert with " + batchedWriteCount); e.printStackTrace(); return Status.ERROR; } } catch (final Exception e) { e.printStackTrace(); return Status.ERROR; } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public final Status read(final String table, final String key, - final Set fields, final HashMap result) { + final Set fields, final Map result) { try { final MongoCollection collection = database.getCollection(table); final DocumentBuilder query = DOCUMENT_BUILDER.get().reset().add("_id", key); Document queryResult = null; if (fields != null) { final DocumentBuilder fieldsToReturn = BuilderFactory.start(); final Iterator iter = fields.iterator(); while (iter.hasNext()) { fieldsToReturn.add(iter.next(), 1); } final Find.Builder fb = new Find.Builder(query); fb.projection(fieldsToReturn); fb.setLimit(1); fb.setBatchSize(1); fb.readPreference(readPreference); final MongoIterator ci = collection.find(fb.build()); if (ci.hasNext()) { queryResult = ci.next(); ci.close(); } } else { queryResult = collection.findOne(query); } if (queryResult != null) { fillMap(result, queryResult); } return queryResult != null ? Status.OK : Status.NOT_FOUND; } catch (final Exception e) { System.err.println(e.toString()); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the {@link DB} * class's description for a discussion of error codes. */ @Override public final Status scan(final String table, final String startkey, final int recordcount, final Set fields, final Vector> result) { try { final MongoCollection collection = database.getCollection(table); final Find.Builder find = Find.builder().query(where("_id").greaterThanOrEqualTo(startkey)) .limit(recordcount).batchSize(recordcount).sort(Sort.asc("_id")) .readPreference(readPreference); if (fields != null) { final DocumentBuilder fieldsDoc = BuilderFactory.start(); for (final String field : fields) { fieldsDoc.add(field, INCLUDE); } find.projection(fieldsDoc); } result.ensureCapacity(recordcount); final MongoIterator cursor = collection.find(find); if (!cursor.hasNext()) { System.err.println("Nothing found in scan for key " + startkey); return Status.NOT_FOUND; } while (cursor.hasNext()) { // toMap() returns a Map but result.add() expects a // Map. Hence, the suppress warnings. final Document doc = cursor.next(); final HashMap docAsMap = new HashMap(); fillMap(docAsMap, doc); result.add(docAsMap); } return Status.OK; } catch (final Exception e) { System.err.println(e.toString()); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See the {@link DB} * class's description for a discussion of error codes. */ @Override public final Status update(final String table, final String key, - final HashMap values) { + final Map values) { try { final MongoCollection collection = database.getCollection(table); final DocumentBuilder query = BuilderFactory.start().add("_id", key); final DocumentBuilder update = BuilderFactory.start(); final DocumentBuilder fieldsToSet = update.push("$set"); for (final Map.Entry entry : values.entrySet()) { fieldsToSet.add(entry.getKey(), entry.getValue().toArray()); } final long res = collection.update(query, update, false, false, writeConcern); return writeConcern == Durability.NONE || res == 1 ? Status.OK : Status.NOT_FOUND; } catch (final Exception e) { System.err.println(e.toString()); return Status.ERROR; } } /** * Fills the map with the ByteIterators from the document. * * @param result * The map to fill. * @param queryResult * The document to fill from. */ - protected final void fillMap(final HashMap result, + protected final void fillMap(final Map result, final Document queryResult) { for (final Element be : queryResult) { if (be.getType() == ElementType.BINARY) { result.put(be.getName(), new BinaryByteArrayIterator((BinaryElement) be)); } } } /** * BinaryByteArrayIterator provides an adapter from a {@link BinaryElement} to * a {@link ByteIterator}. */ private static final class BinaryByteArrayIterator extends ByteIterator { /** The binary data. */ private final BinaryElement binaryElement; /** The current offset into the binary element. */ private int offset; /** * Creates a new BinaryByteArrayIterator. * * @param element * The {@link BinaryElement} to iterate over. */ public BinaryByteArrayIterator(final BinaryElement element) { this.binaryElement = element; this.offset = 0; } /** * {@inheritDoc} *

* Overridden to return the number of bytes remaining in the iterator. *

*/ @Override public long bytesLeft() { return Math.max(0, binaryElement.length() - offset); } /** * {@inheritDoc} *

* Overridden to return true if there is more data in the * {@link BinaryElement}. *

*/ @Override public boolean hasNext() { return (offset < binaryElement.length()); } /** * {@inheritDoc} *

* Overridden to return the next value and advance the iterator. *

*/ @Override public byte nextByte() { final byte value = binaryElement.get(offset); offset += 1; return value; } } } diff --git a/mongodb/src/main/java/com/yahoo/ycsb/db/MongoDbClient.java b/mongodb/src/main/java/com/yahoo/ycsb/db/MongoDbClient.java index 2b7cb114..5704b413 100644 --- a/mongodb/src/main/java/com/yahoo/ycsb/db/MongoDbClient.java +++ b/mongodb/src/main/java/com/yahoo/ycsb/db/MongoDbClient.java @@ -1,470 +1,470 @@ /** * Copyright (c) 2012 - 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /* * MongoDB client binding for YCSB. * * Submitted by Yen Pai on 5/11/2010. * * https://gist.github.com/000a66b8db2caf42467b#file_mongo_database.java */ package com.yahoo.ycsb.db; import com.mongodb.MongoClient; import com.mongodb.MongoClientURI; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; import com.mongodb.client.FindIterable; import com.mongodb.client.MongoCollection; import com.mongodb.client.MongoCursor; import com.mongodb.client.MongoDatabase; import com.mongodb.client.model.InsertManyOptions; import com.mongodb.client.model.UpdateOneModel; import com.mongodb.client.model.UpdateOptions; import com.mongodb.client.result.DeleteResult; import com.mongodb.client.result.UpdateResult; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import org.bson.Document; import org.bson.types.Binary; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.util.concurrent.atomic.AtomicInteger; /** * MongoDB binding for YCSB framework using the MongoDB Inc. driver *

* See the README.md for configuration information. *

* * @author ypai * @see MongoDB Inc. * driver */ public class MongoDbClient extends DB { /** Used to include a field in a response. */ private static final Integer INCLUDE = Integer.valueOf(1); /** The options to use for inserting many documents. */ private static final InsertManyOptions INSERT_UNORDERED = new InsertManyOptions().ordered(false); /** The options to use for inserting a single document. */ private static final UpdateOptions UPDATE_WITH_UPSERT = new UpdateOptions() .upsert(true); /** * The database name to access. */ private static String databaseName; /** The database name to access. */ private static MongoDatabase database; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** A singleton Mongo instance. */ private static MongoClient mongoClient; /** The default read preference for the test. */ private static ReadPreference readPreference; /** The default write concern for the test. */ private static WriteConcern writeConcern; /** The batch size to use for inserts. */ private static int batchSize; /** If true then use updates with the upsert option for inserts. */ private static boolean useUpsert; /** The bulk inserts pending for the thread. */ private final List bulkInserts = new ArrayList(); /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { try { mongoClient.close(); } catch (Exception e1) { System.err.println("Could not close MongoDB connection pool: " + e1.toString()); e1.printStackTrace(); return; } finally { database = null; mongoClient = null; } } } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the {@link DB} * class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { MongoCollection collection = database.getCollection(table); Document query = new Document("_id", key); DeleteResult result = collection.withWriteConcern(writeConcern).deleteOne(query); if (result.wasAcknowledged() && result.getDeletedCount() == 0) { System.err.println("Nothing deleted for key " + key); return Status.NOT_FOUND; } return Status.OK; } catch (Exception e) { System.err.println(e.toString()); return Status.ERROR; } } /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INCLUDE) { if (mongoClient != null) { return; } Properties props = getProperties(); // Set insert batchsize, default 1 - to be YCSB-original equivalent batchSize = Integer.parseInt(props.getProperty("batchsize", "1")); // Set is inserts are done as upserts. Defaults to false. useUpsert = Boolean.parseBoolean( props.getProperty("mongodb.upsert", "false")); // Just use the standard connection format URL // http://docs.mongodb.org/manual/reference/connection-string/ // to configure the client. String url = props.getProperty("mongodb.url", null); boolean defaultedUrl = false; if (url == null) { defaultedUrl = true; url = "mongodb://localhost:27017/ycsb?w=1"; } url = OptionsSupport.updateUrl(url, props); if (!url.startsWith("mongodb://")) { System.err.println("ERROR: Invalid URL: '" + url + "'. Must be of the form " + "'mongodb://:,:/database?options'. " + "http://docs.mongodb.org/manual/reference/connection-string/"); System.exit(1); } try { MongoClientURI uri = new MongoClientURI(url); String uriDb = uri.getDatabase(); if (!defaultedUrl && (uriDb != null) && !uriDb.isEmpty() && !"admin".equals(uriDb)) { databaseName = uriDb; } else { // If no database is specified in URI, use "ycsb" databaseName = "ycsb"; } readPreference = uri.getOptions().getReadPreference(); writeConcern = uri.getOptions().getWriteConcern(); mongoClient = new MongoClient(uri); database = mongoClient.getDatabase(databaseName) .withReadPreference(readPreference) .withWriteConcern(writeConcern); System.out.println("mongo client connection created with " + url); } catch (Exception e1) { System.err .println("Could not initialize MongoDB connection pool for Loader: " + e1.toString()); e1.printStackTrace(); return; } } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the {@link DB} * class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, - HashMap values) { + Map values) { try { MongoCollection collection = database.getCollection(table); Document toInsert = new Document("_id", key); for (Map.Entry entry : values.entrySet()) { toInsert.put(entry.getKey(), entry.getValue().toArray()); } if (batchSize == 1) { if (useUpsert) { // this is effectively an insert, but using an upsert instead due // to current inability of the framework to clean up after itself // between test runs. collection.replaceOne(new Document("_id", toInsert.get("_id")), toInsert, UPDATE_WITH_UPSERT); } else { collection.insertOne(toInsert); } } else { bulkInserts.add(toInsert); if (bulkInserts.size() == batchSize) { if (useUpsert) { List> updates = new ArrayList>(bulkInserts.size()); for (Document doc : bulkInserts) { updates.add(new UpdateOneModel( new Document("_id", doc.get("_id")), doc, UPDATE_WITH_UPSERT)); } collection.bulkWrite(updates); } else { collection.insertMany(bulkInserts, INSERT_UNORDERED); } bulkInserts.clear(); } else { return Status.BATCHED_OK; } } return Status.OK; } catch (Exception e) { System.err.println("Exception while trying bulk insert with " + bulkInserts.size()); e.printStackTrace(); return Status.ERROR; } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { MongoCollection collection = database.getCollection(table); Document query = new Document("_id", key); FindIterable findIterable = collection.find(query); if (fields != null) { Document projection = new Document(); for (String field : fields) { projection.put(field, INCLUDE); } findIterable.projection(projection); } Document queryResult = findIterable.first(); if (queryResult != null) { fillMap(result, queryResult); } return queryResult != null ? Status.OK : Status.NOT_FOUND; } catch (Exception e) { System.err.println(e.toString()); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the {@link DB} * class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { MongoCursor cursor = null; try { MongoCollection collection = database.getCollection(table); Document scanRange = new Document("$gte", startkey); Document query = new Document("_id", scanRange); Document sort = new Document("_id", INCLUDE); FindIterable findIterable = collection.find(query).sort(sort).limit(recordcount); if (fields != null) { Document projection = new Document(); for (String fieldName : fields) { projection.put(fieldName, INCLUDE); } findIterable.projection(projection); } cursor = findIterable.iterator(); if (!cursor.hasNext()) { System.err.println("Nothing found in scan for key " + startkey); return Status.ERROR; } result.ensureCapacity(recordcount); while (cursor.hasNext()) { HashMap resultMap = new HashMap(); Document obj = cursor.next(); fillMap(resultMap, obj); result.add(resultMap); } return Status.OK; } catch (Exception e) { System.err.println(e.toString()); return Status.ERROR; } finally { if (cursor != null) { cursor.close(); } } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, - HashMap values) { + Map values) { try { MongoCollection collection = database.getCollection(table); Document query = new Document("_id", key); Document fieldsToSet = new Document(); for (Map.Entry entry : values.entrySet()) { fieldsToSet.put(entry.getKey(), entry.getValue().toArray()); } Document update = new Document("$set", fieldsToSet); UpdateResult result = collection.updateOne(query, update); if (result.wasAcknowledged() && result.getMatchedCount() == 0) { System.err.println("Nothing updated for key " + key); return Status.NOT_FOUND; } return Status.OK; } catch (Exception e) { System.err.println(e.toString()); return Status.ERROR; } } /** * Fills the map with the values from the DBObject. * * @param resultMap * The map to fill/ * @param obj * The object to copy values from. */ protected void fillMap(Map resultMap, Document obj) { for (Map.Entry entry : obj.entrySet()) { if (entry.getValue() instanceof Binary) { resultMap.put(entry.getKey(), new ByteArrayByteIterator(((Binary) entry.getValue()).getData())); } } } } diff --git a/mongodb/src/test/java/com/yahoo/ycsb/db/AbstractDBTestCases.java b/mongodb/src/test/java/com/yahoo/ycsb/db/AbstractDBTestCases.java index 9a0b095f..90d343eb 100644 --- a/mongodb/src/test/java/com/yahoo/ycsb/db/AbstractDBTestCases.java +++ b/mongodb/src/test/java/com/yahoo/ycsb/db/AbstractDBTestCases.java @@ -1,339 +1,340 @@ /* * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.notNullValue; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeNoException; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.Status; import org.junit.BeforeClass; import org.junit.Test; import java.io.IOException; import java.net.InetAddress; import java.net.Socket; import java.util.Collections; import java.util.HashMap; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * MongoDbClientTest provides runs the basic DB test cases. *

* The tests will be skipped if MongoDB is not running on port 27017 on the * local machine. See the README.md for how to get MongoDB running. *

*/ @SuppressWarnings("boxing") public abstract class AbstractDBTestCases { /** The default port for MongoDB. */ private static final int MONGODB_DEFAULT_PORT = 27017; /** * Verifies the mongod process (or some process) is running on port 27017, if * not the tests are skipped. */ @BeforeClass public static void setUpBeforeClass() { // Test if we can connect. Socket socket = null; try { // Connect socket = new Socket(InetAddress.getLocalHost(), MONGODB_DEFAULT_PORT); assertThat("Socket is not bound.", socket.getLocalPort(), not(-1)); } catch (IOException connectFailed) { assumeNoException("MongoDB is not running. Skipping tests.", connectFailed); } finally { if (socket != null) { try { socket.close(); } catch (IOException ignore) { // Ignore. } } socket = null; } } /** * Test method for {@link DB#insert}, {@link DB#read}, and {@link DB#delete} . */ @Test public void testInsertReadDelete() { final DB client = getDB(); final String table = getClass().getSimpleName(); final String id = "delete"; HashMap inserted = new HashMap(); inserted.put("a", new ByteArrayByteIterator(new byte[] { 1, 2, 3, 4 })); Status result = client.insert(table, id, inserted); assertThat("Insert did not return success (0).", result, is(Status.OK)); HashMap read = new HashMap(); Set keys = Collections.singleton("a"); result = client.read(table, id, keys, read); assertThat("Read did not return success (0).", result, is(Status.OK)); for (String key : keys) { ByteIterator iter = read.get(key); assertThat("Did not read the inserted field: " + key, iter, notNullValue()); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 1))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 2))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 3))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 4))); assertFalse(iter.hasNext()); } result = client.delete(table, id); assertThat("Delete did not return success (0).", result, is(Status.OK)); read.clear(); result = client.read(table, id, null, read); assertThat("Read, after delete, did not return not found (1).", result, is(Status.NOT_FOUND)); assertThat("Found the deleted fields.", read.size(), is(0)); result = client.delete(table, id); assertThat("Delete did not return not found (1).", result, is(Status.NOT_FOUND)); } /** * Test method for {@link DB#insert}, {@link DB#read}, and {@link DB#update} . */ @Test public void testInsertReadUpdate() { DB client = getDB(); final String table = getClass().getSimpleName(); final String id = "update"; HashMap inserted = new HashMap(); inserted.put("a", new ByteArrayByteIterator(new byte[] { 1, 2, 3, 4 })); Status result = client.insert(table, id, inserted); assertThat("Insert did not return success (0).", result, is(Status.OK)); HashMap read = new HashMap(); Set keys = Collections.singleton("a"); result = client.read(table, id, keys, read); assertThat("Read did not return success (0).", result, is(Status.OK)); for (String key : keys) { ByteIterator iter = read.get(key); assertThat("Did not read the inserted field: " + key, iter, notNullValue()); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 1))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 2))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 3))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 4))); assertFalse(iter.hasNext()); } HashMap updated = new HashMap(); updated.put("a", new ByteArrayByteIterator(new byte[] { 5, 6, 7, 8 })); result = client.update(table, id, updated); assertThat("Update did not return success (0).", result, is(Status.OK)); read.clear(); result = client.read(table, id, null, read); assertThat("Read, after update, did not return success (0).", result, is(Status.OK)); for (String key : keys) { ByteIterator iter = read.get(key); assertThat("Did not read the inserted field: " + key, iter, notNullValue()); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 5))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 6))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 7))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 8))); assertFalse(iter.hasNext()); } } /** * Test method for {@link DB#insert}, {@link DB#read}, and {@link DB#update} . */ @Test public void testInsertReadUpdateWithUpsert() { Properties props = new Properties(); props.setProperty("mongodb.upsert", "true"); DB client = getDB(props); final String table = getClass().getSimpleName(); final String id = "updateWithUpsert"; HashMap inserted = new HashMap(); inserted.put("a", new ByteArrayByteIterator(new byte[] { 1, 2, 3, 4 })); Status result = client.insert(table, id, inserted); assertThat("Insert did not return success (0).", result, is(Status.OK)); HashMap read = new HashMap(); Set keys = Collections.singleton("a"); result = client.read(table, id, keys, read); assertThat("Read did not return success (0).", result, is(Status.OK)); for (String key : keys) { ByteIterator iter = read.get(key); assertThat("Did not read the inserted field: " + key, iter, notNullValue()); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 1))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 2))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 3))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 4))); assertFalse(iter.hasNext()); } HashMap updated = new HashMap(); updated.put("a", new ByteArrayByteIterator(new byte[] { 5, 6, 7, 8 })); result = client.update(table, id, updated); assertThat("Update did not return success (0).", result, is(Status.OK)); read.clear(); result = client.read(table, id, null, read); assertThat("Read, after update, did not return success (0).", result, is(Status.OK)); for (String key : keys) { ByteIterator iter = read.get(key); assertThat("Did not read the inserted field: " + key, iter, notNullValue()); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 5))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 6))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 7))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) 8))); assertFalse(iter.hasNext()); } } /** * Test method for {@link DB#scan}. */ @Test public void testScan() { final DB client = getDB(); final String table = getClass().getSimpleName(); // Insert a bunch of documents. for (int i = 0; i < 100; ++i) { HashMap inserted = new HashMap(); inserted.put("a", new ByteArrayByteIterator(new byte[] { (byte) (i & 0xFF), (byte) (i >> 8 & 0xFF), (byte) (i >> 16 & 0xFF), (byte) (i >> 24 & 0xFF) })); Status result = client.insert(table, padded(i), inserted); assertThat("Insert did not return success (0).", result, is(Status.OK)); } Set keys = Collections.singleton("a"); Vector> results = new Vector>(); Status result = client.scan(table, "00050", 5, null, results); assertThat("Read did not return success (0).", result, is(Status.OK)); assertThat(results.size(), is(5)); for (int i = 0; i < 5; ++i) { - HashMap read = results.get(i); + Map read = results.get(i); for (String key : keys) { ByteIterator iter = read.get(key); assertThat("Did not read the inserted field: " + key, iter, notNullValue()); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) ((i + 50) & 0xFF)))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) ((i + 50) >> 8 & 0xFF)))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) ((i + 50) >> 16 & 0xFF)))); assertTrue(iter.hasNext()); assertThat(iter.nextByte(), is(Byte.valueOf((byte) ((i + 50) >> 24 & 0xFF)))); assertFalse(iter.hasNext()); } } } /** * Gets the test DB. * * @return The test DB. */ protected DB getDB() { return getDB(new Properties()); } /** * Gets the test DB. * * @param props * Properties to pass to the client. * @return The test DB. */ protected abstract DB getDB(Properties props); /** * Creates a zero padded integer. * * @param i * The integer to padd. * @return The padded integer. */ private String padded(int i) { String result = String.valueOf(i); while (result.length() < 5) { result = "0" + result; } return result; } } \ No newline at end of file diff --git a/nosqldb/src/main/java/com/yahoo/ycsb/db/NoSqlDbClient.java b/nosqldb/src/main/java/com/yahoo/ycsb/db/NoSqlDbClient.java index 82820620..e4faad65 100644 --- a/nosqldb/src/main/java/com/yahoo/ycsb/db/NoSqlDbClient.java +++ b/nosqldb/src/main/java/com/yahoo/ycsb/db/NoSqlDbClient.java @@ -1,250 +1,247 @@ /** * Copyright (c) 2012 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.SortedMap; import java.util.Vector; import java.util.concurrent.TimeUnit; import oracle.kv.Consistency; import oracle.kv.Durability; import oracle.kv.FaultException; import oracle.kv.KVStore; import oracle.kv.KVStoreConfig; import oracle.kv.KVStoreFactory; import oracle.kv.Key; import oracle.kv.RequestLimitConfig; import oracle.kv.Value; import oracle.kv.ValueVersion; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; /** * A database interface layer for Oracle NoSQL Database. */ public class NoSqlDbClient extends DB { private KVStore store; private int getPropertyInt(Properties properties, String key, int defaultValue) throws DBException { String p = properties.getProperty(key); int i = defaultValue; if (p != null) { try { i = Integer.parseInt(p); } catch (NumberFormatException e) { throw new DBException("Illegal number format in " + key + " property"); } } return i; } @Override public void init() throws DBException { Properties properties = getProperties(); /* Mandatory properties */ String storeName = properties.getProperty("storeName", "kvstore"); String[] helperHosts = properties.getProperty("helperHost", "localhost:5000").split(","); KVStoreConfig config = new KVStoreConfig(storeName, helperHosts); /* Optional properties */ String p; p = properties.getProperty("consistency"); if (p != null) { if (p.equalsIgnoreCase("ABSOLUTE")) { config.setConsistency(Consistency.ABSOLUTE); } else if (p.equalsIgnoreCase("NONE_REQUIRED")) { config.setConsistency(Consistency.NONE_REQUIRED); } else { throw new DBException("Illegal value in consistency property"); } } p = properties.getProperty("durability"); if (p != null) { if (p.equalsIgnoreCase("COMMIT_NO_SYNC")) { config.setDurability(Durability.COMMIT_NO_SYNC); } else if (p.equalsIgnoreCase("COMMIT_SYNC")) { config.setDurability(Durability.COMMIT_SYNC); } else if (p.equalsIgnoreCase("COMMIT_WRITE_NO_SYNC")) { config.setDurability(Durability.COMMIT_WRITE_NO_SYNC); } else { throw new DBException("Illegal value in durability property"); } } int maxActiveRequests = getPropertyInt(properties, "requestLimit.maxActiveRequests", RequestLimitConfig.DEFAULT_MAX_ACTIVE_REQUESTS); int requestThresholdPercent = getPropertyInt(properties, "requestLimit.requestThresholdPercent", RequestLimitConfig.DEFAULT_REQUEST_THRESHOLD_PERCENT); int nodeLimitPercent = getPropertyInt(properties, "requestLimit.nodeLimitPercent", RequestLimitConfig.DEFAULT_NODE_LIMIT_PERCENT); RequestLimitConfig requestLimitConfig; /* * It is said that the constructor could throw NodeRequestLimitException in * Javadoc, the exception is not provided */ // try { requestLimitConfig = new RequestLimitConfig(maxActiveRequests, requestThresholdPercent, nodeLimitPercent); // } catch (NodeRequestLimitException e) { // throw new DBException(e); // } config.setRequestLimit(requestLimitConfig); p = properties.getProperty("requestTimeout"); if (p != null) { long timeout = 1; try { timeout = Long.parseLong(p); } catch (NumberFormatException e) { throw new DBException( "Illegal number format in requestTimeout property"); } try { // TODO Support other TimeUnit config.setRequestTimeout(timeout, TimeUnit.SECONDS); } catch (IllegalArgumentException e) { throw new DBException(e); } } try { store = KVStoreFactory.getStore(config); } catch (FaultException e) { throw new DBException(e); } } @Override public void cleanup() throws DBException { store.close(); } /** * Create a key object. We map "table" and (YCSB's) "key" to a major component * of the oracle.kv.Key, and "field" to a minor component. * * @return An oracle.kv.Key object. */ private static Key createKey(String table, String key, String field) { List majorPath = new ArrayList(); majorPath.add(table); majorPath.add(key); if (field == null) { return Key.createKey(majorPath); } return Key.createKey(majorPath, field); } private static Key createKey(String table, String key) { return createKey(table, key, null); } private static String getFieldFromKey(Key key) { return key.getMinorPath().get(0); } @Override - public Status read(String table, String key, Set fields, - HashMap result) { + public Status read(String table, String key, Set fields, Map result) { Key kvKey = createKey(table, key); SortedMap kvResult; try { kvResult = store.multiGet(kvKey, null, null); } catch (FaultException e) { System.err.println(e); return Status.ERROR; } for (Map.Entry entry : kvResult.entrySet()) { /* If fields is null, read all fields */ String field = getFieldFromKey(entry.getKey()); if (fields != null && !fields.contains(field)) { continue; } result.put(field, new ByteArrayByteIterator(entry.getValue().getValue().getValue())); } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { System.err.println("Oracle NoSQL Database does not support Scan semantics"); return Status.ERROR; } @Override - public Status update(String table, String key, - HashMap values) { + public Status update(String table, String key, Map values) { for (Map.Entry entry : values.entrySet()) { Key kvKey = createKey(table, key, entry.getKey()); Value kvValue = Value.createValue(entry.getValue().toArray()); try { store.put(kvKey, kvValue); } catch (FaultException e) { System.err.println(e); return Status.ERROR; } } return Status.OK; } @Override - public Status insert(String table, String key, - HashMap values) { + public Status insert(String table, String key, Map values) { return update(table, key, values); } @Override public Status delete(String table, String key) { Key kvKey = createKey(table, key); try { store.multiDelete(kvKey, null, null); } catch (FaultException e) { System.err.println(e); return Status.ERROR; } return Status.OK; } } diff --git a/orientdb/src/main/java/com/yahoo/ycsb/db/OrientDBClient.java b/orientdb/src/main/java/com/yahoo/ycsb/db/OrientDBClient.java index 5c54d0c8..caa5f7dc 100644 --- a/orientdb/src/main/java/com/yahoo/ycsb/db/OrientDBClient.java +++ b/orientdb/src/main/java/com/yahoo/ycsb/db/OrientDBClient.java @@ -1,320 +1,324 @@ /** * Copyright (c) 2012 - 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.orientechnologies.orient.client.remote.OServerAdmin; import com.orientechnologies.orient.core.config.OGlobalConfiguration; import com.orientechnologies.orient.core.db.OPartitionedDatabasePool; import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx; import com.orientechnologies.orient.core.dictionary.ODictionary; import com.orientechnologies.orient.core.exception.OConcurrentModificationException; import com.orientechnologies.orient.core.index.OIndexCursor; import com.orientechnologies.orient.core.record.ORecord; import com.orientechnologies.orient.core.record.impl.ODocument; import com.yahoo.ycsb.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.util.*; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.Vector; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; /** * OrientDB client for YCSB framework. */ public class OrientDBClient extends DB { private static final String URL_PROPERTY = "orientdb.url"; private static final String URL_PROPERTY_DEFAULT = "plocal:." + File.separator + "target" + File.separator + "databases" + File.separator + "ycsb"; private static final String USER_PROPERTY = "orientdb.user"; private static final String USER_PROPERTY_DEFAULT = "admin"; private static final String PASSWORD_PROPERTY = "orientdb.password"; private static final String PASSWORD_PROPERTY_DEFAULT = "admin"; private static final String NEWDB_PROPERTY = "orientdb.newdb"; private static final String NEWDB_PROPERTY_DEFAULT = "false"; private static final String STORAGE_TYPE_PROPERTY = "orientdb.remote.storagetype"; private static final String ORIENTDB_DOCUMENT_TYPE = "document"; private static final String CLASS = "usertable"; private static final Lock INIT_LOCK = new ReentrantLock(); private static boolean dbChecked = false; private static volatile OPartitionedDatabasePool databasePool; private static boolean initialized = false; private static int clientCounter = 0; private boolean isRemote = false; private static final Logger LOG = LoggerFactory.getLogger(OrientDBClient.class); /** * Initialize any state for this DB. Called once per DB instance; there is one DB instance per client thread. */ public void init() throws DBException { // initialize OrientDB driver final Properties props = getProperties(); String url = props.getProperty(URL_PROPERTY, URL_PROPERTY_DEFAULT); String user = props.getProperty(USER_PROPERTY, USER_PROPERTY_DEFAULT); String password = props.getProperty(PASSWORD_PROPERTY, PASSWORD_PROPERTY_DEFAULT); Boolean newdb = Boolean.parseBoolean(props.getProperty(NEWDB_PROPERTY, NEWDB_PROPERTY_DEFAULT)); String remoteStorageType = props.getProperty(STORAGE_TYPE_PROPERTY); INIT_LOCK.lock(); try { clientCounter++; if (!initialized) { OGlobalConfiguration.dumpConfiguration(System.out); LOG.info("OrientDB loading database url = " + url); ODatabaseDocumentTx db = new ODatabaseDocumentTx(url); if (db.getStorage().isRemote()) { isRemote = true; } if (!dbChecked) { if (!isRemote) { if (newdb) { if (db.exists()) { db.open(user, password); LOG.info("OrientDB drop and recreate fresh db"); db.drop(); } db.create(); } else { if (!db.exists()) { LOG.info("OrientDB database not found, creating fresh db"); db.create(); } } } else { OServerAdmin server = new OServerAdmin(url).connect(user, password); if (remoteStorageType == null) { throw new DBException( "When connecting to a remote OrientDB instance, " + "specify a database storage type (plocal or memory) with " + STORAGE_TYPE_PROPERTY); } if (newdb) { if (server.existsDatabase()) { LOG.info("OrientDB drop and recreate fresh db"); server.dropDatabase(remoteStorageType); } server.createDatabase(db.getName(), ORIENTDB_DOCUMENT_TYPE, remoteStorageType); } else { if (!server.existsDatabase()) { LOG.info("OrientDB database not found, creating fresh db"); server.createDatabase(server.getURL(), ORIENTDB_DOCUMENT_TYPE, remoteStorageType); } } server.close(); } dbChecked = true; } if (db.isClosed()) { db.open(user, password); } if (!db.getMetadata().getSchema().existsClass(CLASS)) { db.getMetadata().getSchema().createClass(CLASS); } db.close(); if (databasePool == null) { databasePool = new OPartitionedDatabasePool(url, user, password); } initialized = true; } } catch (Exception e) { LOG.error("Could not initialize OrientDB connection pool for Loader: " + e.toString()); e.printStackTrace(); } finally { INIT_LOCK.unlock(); } } OPartitionedDatabasePool getDatabasePool() { return databasePool; } @Override public void cleanup() throws DBException { INIT_LOCK.lock(); try { clientCounter--; if (clientCounter == 0) { databasePool.close(); } databasePool = null; initialized = false; } finally { INIT_LOCK.unlock(); } } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try (ODatabaseDocumentTx db = databasePool.acquire()) { final ODocument document = new ODocument(CLASS); for (Map.Entry entry : StringByteIterator.getStringMap(values).entrySet()) { document.field(entry.getKey(), entry.getValue()); } document.save(); final ODictionary dictionary = db.getMetadata().getIndexManager().getDictionary(); dictionary.put(key, document); return Status.OK; } catch (Exception e) { e.printStackTrace(); } return Status.ERROR; } @Override public Status delete(String table, String key) { while (true) { try (ODatabaseDocumentTx db = databasePool.acquire()) { final ODictionary dictionary = db.getMetadata().getIndexManager().getDictionary(); dictionary.remove(key); return Status.OK; } catch (OConcurrentModificationException cme) { continue; } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } } @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try (ODatabaseDocumentTx db = databasePool.acquire()) { final ODictionary dictionary = db.getMetadata().getIndexManager().getDictionary(); final ODocument document = dictionary.get(key); if (document != null) { if (fields != null) { for (String field : fields) { result.put(field, new StringByteIterator((String) document.field(field))); } } else { for (String field : document.fieldNames()) { result.put(field, new StringByteIterator((String) document.field(field))); } } return Status.OK; } } catch (Exception e) { e.printStackTrace(); } return Status.ERROR; } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { while (true) { try (ODatabaseDocumentTx db = databasePool.acquire()) { final ODictionary dictionary = db.getMetadata().getIndexManager().getDictionary(); final ODocument document = dictionary.get(key); if (document != null) { for (Map.Entry entry : StringByteIterator.getStringMap(values).entrySet()) { document.field(entry.getKey(), entry.getValue()); } document.save(); return Status.OK; } } catch (OConcurrentModificationException cme) { continue; } catch (Exception e) { e.printStackTrace(); return Status.ERROR; } } } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { if (isRemote) { // Iterator methods needed for scanning are Unsupported for remote database connections. LOG.warn("OrientDB scan operation is not implemented for remote database connections."); return Status.NOT_IMPLEMENTED; } try (ODatabaseDocumentTx db = databasePool.acquire()) { final ODictionary dictionary = db.getMetadata().getIndexManager().getDictionary(); final OIndexCursor entries = dictionary.getIndex().iterateEntriesMajor(startkey, true, true); int currentCount = 0; while (entries.hasNext()) { final ODocument document = entries.next().getRecord(); final HashMap map = new HashMap<>(); result.add(map); if (fields != null) { for (String field : fields) { map.put(field, new StringByteIterator((String) document.field(field))); } } else { for (String field : document.fieldNames()) { map.put(field, new StringByteIterator((String) document.field(field))); } } currentCount++; if (currentCount >= recordcount) { break; } } return Status.OK; } catch (Exception e) { e.printStackTrace(); } return Status.ERROR; } } diff --git a/rados/src/main/java/com/yahoo/ycsb/db/RadosClient.java b/rados/src/main/java/com/yahoo/ycsb/db/RadosClient.java index 4e296abc..8f599fd4 100644 --- a/rados/src/main/java/com/yahoo/ycsb/db/RadosClient.java +++ b/rados/src/main/java/com/yahoo/ycsb/db/RadosClient.java @@ -1,178 +1,179 @@ /** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.ceph.rados.Rados; import com.ceph.rados.IoCTX; import com.ceph.rados.jna.RadosObjectInfo; import com.ceph.rados.ReadOp; import com.ceph.rados.ReadOp.ReadResult; import com.ceph.rados.exceptions.RadosException; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import java.io.File; import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.Vector; import org.json.JSONObject; /** * YCSB binding for RADOS of Ceph. * * See {@code rados/README.md} for details. */ public class RadosClient extends DB { private Rados rados; private IoCTX ioctx; public static final String CONFIG_FILE_PROPERTY = "rados.configfile"; public static final String CONFIG_FILE_DEFAULT = "/etc/ceph/ceph.conf"; public static final String ID_PROPERTY = "rados.id"; public static final String ID_DEFAULT = "admin"; public static final String POOL_PROPERTY = "rados.pool"; public static final String POOL_DEFAULT = "data"; private boolean isInited = false; public void init() throws DBException { Properties props = getProperties(); String configfile = props.getProperty(CONFIG_FILE_PROPERTY); if (configfile == null) { configfile = CONFIG_FILE_DEFAULT; } String id = props.getProperty(ID_PROPERTY); if (id == null) { id = ID_DEFAULT; } String pool = props.getProperty(POOL_PROPERTY); if (pool == null) { pool = POOL_DEFAULT; } // try { // } catch (UnsatisfiedLinkError e) { // throw new DBException("RADOS library is not loaded."); // } rados = new Rados(id); try { rados.confReadFile(new File(configfile)); rados.connect(); ioctx = rados.ioCtxCreate(pool); } catch (RadosException e) { throw new DBException(e.getMessage() + ": " + e.getReturnValue()); } isInited = true; } public void cleanup() throws DBException { if (isInited) { rados.shutDown(); rados.ioCtxDestroy(ioctx); isInited = false; } } @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { byte[] buffer; try { RadosObjectInfo info = ioctx.stat(key); buffer = new byte[(int)info.getSize()]; ReadOp rop = ioctx.readOpCreate(); ReadResult readResult = rop.queueRead(0, info.getSize()); // TODO: more size than byte length possible; // rop.operate(key, Rados.OPERATION_NOFLAG); // for rados-java 0.3.0 rop.operate(key, 0); // readResult.raiseExceptionOnError("Error ReadOP(%d)", readResult.getRVal()); // for rados-java 0.3.0 if (readResult.getRVal() < 0) { throw new RadosException("Error ReadOP", readResult.getRVal()); } if (info.getSize() != readResult.getBytesRead()) { return new Status("ERROR", "Error the object size read"); } readResult.getBuffer().get(buffer); } catch (RadosException e) { return new Status("ERROR-" + e.getReturnValue(), e.getMessage()); } JSONObject json = new JSONObject(new String(buffer, java.nio.charset.StandardCharsets.UTF_8)); Set fieldsToReturn = (fields == null ? json.keySet() : fields); for (String name : fieldsToReturn) { result.put(name, new StringByteIterator(json.getString(name))); } return result.isEmpty() ? Status.ERROR : Status.OK; } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { JSONObject json = new JSONObject(); for (final Entry e : values.entrySet()) { json.put(e.getKey(), e.getValue().toString()); } try { ioctx.write(key, json.toString()); } catch (RadosException e) { return new Status("ERROR-" + e.getReturnValue(), e.getMessage()); } return Status.OK; } @Override public Status delete(String table, String key) { try { ioctx.remove(key); } catch (RadosException e) { return new Status("ERROR-" + e.getReturnValue(), e.getMessage()); } return Status.OK; } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { Status rtn = delete(table, key); if (rtn.equals(Status.OK)) { return insert(table, key, values); } return rtn; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { return Status.NOT_IMPLEMENTED; } } diff --git a/redis/src/main/java/com/yahoo/ycsb/db/RedisClient.java b/redis/src/main/java/com/yahoo/ycsb/db/RedisClient.java index fbcfcb0a..a571a2c0 100644 --- a/redis/src/main/java/com/yahoo/ycsb/db/RedisClient.java +++ b/redis/src/main/java/com/yahoo/ycsb/db/RedisClient.java @@ -1,157 +1,158 @@ /** * Copyright (c) 2012 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ /** * Redis client binding for YCSB. * * All YCSB records are mapped to a Redis *hash field*. For scanning * operations, all keys are saved (by an arbitrary hash) in a sorted set. */ package com.yahoo.ycsb.db; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import redis.clients.jedis.Jedis; import redis.clients.jedis.Protocol; import java.util.HashMap; +import java.util.Map; import java.util.Iterator; import java.util.List; import java.util.Properties; import java.util.Set; import java.util.Vector; /** * YCSB binding for Redis. * * See {@code redis/README.md} for details. */ public class RedisClient extends DB { private Jedis jedis; public static final String HOST_PROPERTY = "redis.host"; public static final String PORT_PROPERTY = "redis.port"; public static final String PASSWORD_PROPERTY = "redis.password"; public static final String INDEX_KEY = "_indices"; public void init() throws DBException { Properties props = getProperties(); int port; String portString = props.getProperty(PORT_PROPERTY); if (portString != null) { port = Integer.parseInt(portString); } else { port = Protocol.DEFAULT_PORT; } String host = props.getProperty(HOST_PROPERTY); jedis = new Jedis(host, port); jedis.connect(); String password = props.getProperty(PASSWORD_PROPERTY); if (password != null) { jedis.auth(password); } } public void cleanup() throws DBException { jedis.disconnect(); } /* * Calculate a hash for a key to store it in an index. The actual return value * of this function is not interesting -- it primarily needs to be fast and * scattered along the whole space of doubles. In a real world scenario one * would probably use the ASCII values of the keys. */ private double hash(String key) { return key.hashCode(); } // XXX jedis.select(int index) to switch to `table` @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { if (fields == null) { StringByteIterator.putAllAsByteIterators(result, jedis.hgetAll(key)); } else { String[] fieldArray = (String[]) fields.toArray(new String[fields.size()]); List values = jedis.hmget(key, fieldArray); Iterator fieldIterator = fields.iterator(); Iterator valueIterator = values.iterator(); while (fieldIterator.hasNext() && valueIterator.hasNext()) { result.put(fieldIterator.next(), new StringByteIterator(valueIterator.next())); } assert !fieldIterator.hasNext() && !valueIterator.hasNext(); } return result.isEmpty() ? Status.ERROR : Status.OK; } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { if (jedis.hmset(key, StringByteIterator.getStringMap(values)) .equals("OK")) { jedis.zadd(INDEX_KEY, hash(key), key); return Status.OK; } return Status.ERROR; } @Override public Status delete(String table, String key) { return jedis.del(key) == 0 && jedis.zrem(INDEX_KEY, key) == 0 ? Status.ERROR : Status.OK; } @Override public Status update(String table, String key, - HashMap values) { + Map values) { return jedis.hmset(key, StringByteIterator.getStringMap(values)) .equals("OK") ? Status.OK : Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { Set keys = jedis.zrangeByScore(INDEX_KEY, hash(startkey), Double.POSITIVE_INFINITY, 0, recordcount); HashMap values; for (String key : keys) { values = new HashMap(); read(table, key, fields, values); result.add(values); } return Status.OK; } } diff --git a/rest/src/main/java/com/yahoo/ycsb/webservice/rest/RestClient.java b/rest/src/main/java/com/yahoo/ycsb/webservice/rest/RestClient.java index 2fd14673..3daa7fe4 100644 --- a/rest/src/main/java/com/yahoo/ycsb/webservice/rest/RestClient.java +++ b/rest/src/main/java/com/yahoo/ycsb/webservice/rest/RestClient.java @@ -1,370 +1,371 @@ /** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.webservice.rest; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.HashMap; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.util.zip.GZIPInputStream; import javax.ws.rs.HttpMethod; import org.apache.http.HttpEntity; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.ContentType; import org.apache.http.entity.InputStreamEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; /** * Class responsible for making web service requests for benchmarking purpose. * Using Apache HttpClient over standard Java HTTP API as this is more flexible * and provides better functionality. For example HttpClient can automatically * handle redirects and proxy authentication which the standard Java API can't. */ public class RestClient extends DB { private static final String URL_PREFIX = "url.prefix"; private static final String CON_TIMEOUT = "timeout.con"; private static final String READ_TIMEOUT = "timeout.read"; private static final String EXEC_TIMEOUT = "timeout.exec"; private static final String LOG_ENABLED = "log.enable"; private static final String HEADERS = "headers"; private static final String COMPRESSED_RESPONSE = "response.compression"; private boolean compressedResponse; private boolean logEnabled; private String urlPrefix; private Properties props; private String[] headers; private CloseableHttpClient client; private int conTimeout = 10000; private int readTimeout = 10000; private int execTimeout = 10000; private volatile Criteria requestTimedout = new Criteria(false); @Override public void init() throws DBException { props = getProperties(); urlPrefix = props.getProperty(URL_PREFIX, "http://127.0.0.1:8080"); conTimeout = Integer.valueOf(props.getProperty(CON_TIMEOUT, "10")) * 1000; readTimeout = Integer.valueOf(props.getProperty(READ_TIMEOUT, "10")) * 1000; execTimeout = Integer.valueOf(props.getProperty(EXEC_TIMEOUT, "10")) * 1000; logEnabled = Boolean.valueOf(props.getProperty(LOG_ENABLED, "false").trim()); compressedResponse = Boolean.valueOf(props.getProperty(COMPRESSED_RESPONSE, "false").trim()); headers = props.getProperty(HEADERS, "Accept */* Content-Type application/xml user-agent Mozilla/5.0 ").trim() .split(" "); setupClient(); } private void setupClient() { RequestConfig.Builder requestBuilder = RequestConfig.custom(); requestBuilder = requestBuilder.setConnectTimeout(conTimeout); requestBuilder = requestBuilder.setConnectionRequestTimeout(readTimeout); requestBuilder = requestBuilder.setSocketTimeout(readTimeout); HttpClientBuilder clientBuilder = HttpClientBuilder.create().setDefaultRequestConfig(requestBuilder.build()); this.client = clientBuilder.setConnectionManagerShared(true).build(); } @Override - public Status read(String table, String endpoint, Set fields, HashMap result) { + public Status read(String table, String endpoint, Set fields, Map result) { int responseCode; try { responseCode = httpGet(urlPrefix + endpoint, result); } catch (Exception e) { responseCode = handleExceptions(e, urlPrefix + endpoint, HttpMethod.GET); } if (logEnabled) { System.err.println(new StringBuilder("GET Request: ").append(urlPrefix).append(endpoint) .append(" | Response Code: ").append(responseCode).toString()); } return getStatus(responseCode); } @Override - public Status insert(String table, String endpoint, HashMap values) { + public Status insert(String table, String endpoint, Map values) { int responseCode; try { responseCode = httpExecute(new HttpPost(urlPrefix + endpoint), values.get("data").toString()); } catch (Exception e) { responseCode = handleExceptions(e, urlPrefix + endpoint, HttpMethod.POST); } if (logEnabled) { System.err.println(new StringBuilder("POST Request: ").append(urlPrefix).append(endpoint) .append(" | Response Code: ").append(responseCode).toString()); } return getStatus(responseCode); } @Override public Status delete(String table, String endpoint) { int responseCode; try { responseCode = httpDelete(urlPrefix + endpoint); } catch (Exception e) { responseCode = handleExceptions(e, urlPrefix + endpoint, HttpMethod.DELETE); } if (logEnabled) { System.err.println(new StringBuilder("DELETE Request: ").append(urlPrefix).append(endpoint) .append(" | Response Code: ").append(responseCode).toString()); } return getStatus(responseCode); } @Override - public Status update(String table, String endpoint, HashMap values) { + public Status update(String table, String endpoint, Map values) { int responseCode; try { responseCode = httpExecute(new HttpPut(urlPrefix + endpoint), values.get("data").toString()); } catch (Exception e) { responseCode = handleExceptions(e, urlPrefix + endpoint, HttpMethod.PUT); } if (logEnabled) { System.err.println(new StringBuilder("PUT Request: ").append(urlPrefix).append(endpoint) .append(" | Response Code: ").append(responseCode).toString()); } return getStatus(responseCode); } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { return Status.NOT_IMPLEMENTED; } // Maps HTTP status codes to YCSB status codes. private Status getStatus(int responseCode) { int rc = responseCode / 100; if (responseCode == 400) { return Status.BAD_REQUEST; } else if (responseCode == 403) { return Status.FORBIDDEN; } else if (responseCode == 404) { return Status.NOT_FOUND; } else if (responseCode == 501) { return Status.NOT_IMPLEMENTED; } else if (responseCode == 503) { return Status.SERVICE_UNAVAILABLE; } else if (rc == 5) { return Status.ERROR; } return Status.OK; } private int handleExceptions(Exception e, String url, String method) { if (logEnabled) { System.err.println(new StringBuilder(method).append(" Request: ").append(url).append(" | ") .append(e.getClass().getName()).append(" occured | Error message: ") .append(e.getMessage()).toString()); } if (e instanceof ClientProtocolException) { return 400; } return 500; } // Connection is automatically released back in case of an exception. - private int httpGet(String endpoint, HashMap result) throws IOException { + private int httpGet(String endpoint, Map result) throws IOException { requestTimedout.setIsSatisfied(false); Thread timer = new Thread(new Timer(execTimeout, requestTimedout)); timer.start(); int responseCode = 200; HttpGet request = new HttpGet(endpoint); for (int i = 0; i < headers.length; i = i + 2) { request.setHeader(headers[i], headers[i + 1]); } CloseableHttpResponse response = client.execute(request); responseCode = response.getStatusLine().getStatusCode(); HttpEntity responseEntity = response.getEntity(); // If null entity don't bother about connection release. if (responseEntity != null) { InputStream stream = responseEntity.getContent(); /* * TODO: Gzip Compression must be supported in the future. Header[] * header = response.getAllHeaders(); * if(response.getHeaders("Content-Encoding")[0].getValue().contains * ("gzip")) stream = new GZIPInputStream(stream); */ BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8")); StringBuffer responseContent = new StringBuffer(); String line = ""; while ((line = reader.readLine()) != null) { if (requestTimedout.isSatisfied()) { // Must avoid memory leak. reader.close(); stream.close(); EntityUtils.consumeQuietly(responseEntity); response.close(); client.close(); throw new TimeoutException(); } responseContent.append(line); } timer.interrupt(); result.put("response", new StringByteIterator(responseContent.toString())); // Closing the input stream will trigger connection release. stream.close(); } EntityUtils.consumeQuietly(responseEntity); response.close(); client.close(); return responseCode; } private int httpExecute(HttpEntityEnclosingRequestBase request, String data) throws IOException { requestTimedout.setIsSatisfied(false); Thread timer = new Thread(new Timer(execTimeout, requestTimedout)); timer.start(); int responseCode = 200; for (int i = 0; i < headers.length; i = i + 2) { request.setHeader(headers[i], headers[i + 1]); } InputStreamEntity reqEntity = new InputStreamEntity(new ByteArrayInputStream(data.getBytes()), ContentType.APPLICATION_FORM_URLENCODED); reqEntity.setChunked(true); request.setEntity(reqEntity); CloseableHttpResponse response = client.execute(request); responseCode = response.getStatusLine().getStatusCode(); HttpEntity responseEntity = response.getEntity(); // If null entity don't bother about connection release. if (responseEntity != null) { InputStream stream = responseEntity.getContent(); if (compressedResponse) { stream = new GZIPInputStream(stream); } BufferedReader reader = new BufferedReader(new InputStreamReader(stream, "UTF-8")); StringBuffer responseContent = new StringBuffer(); String line = ""; while ((line = reader.readLine()) != null) { if (requestTimedout.isSatisfied()) { // Must avoid memory leak. reader.close(); stream.close(); EntityUtils.consumeQuietly(responseEntity); response.close(); client.close(); throw new TimeoutException(); } responseContent.append(line); } timer.interrupt(); // Closing the input stream will trigger connection release. stream.close(); } EntityUtils.consumeQuietly(responseEntity); response.close(); client.close(); return responseCode; } private int httpDelete(String endpoint) throws IOException { requestTimedout.setIsSatisfied(false); Thread timer = new Thread(new Timer(execTimeout, requestTimedout)); timer.start(); int responseCode = 200; HttpDelete request = new HttpDelete(endpoint); for (int i = 0; i < headers.length; i = i + 2) { request.setHeader(headers[i], headers[i + 1]); } CloseableHttpResponse response = client.execute(request); responseCode = response.getStatusLine().getStatusCode(); response.close(); client.close(); return responseCode; } /** * Marks the input {@link Criteria} as satisfied when the input time has elapsed. */ class Timer implements Runnable { private long timeout; private Criteria timedout; public Timer(long timeout, Criteria timedout) { this.timedout = timedout; this.timeout = timeout; } @Override public void run() { try { Thread.sleep(timeout); this.timedout.setIsSatisfied(true); } catch (InterruptedException e) { // Do nothing. } } } /** * Sets the flag when a criteria is fulfilled. */ class Criteria { private boolean isSatisfied; public Criteria(boolean isSatisfied) { this.isSatisfied = isSatisfied; } public boolean isSatisfied() { return isSatisfied; } public void setIsSatisfied(boolean satisfied) { this.isSatisfied = satisfied; } } /** * Private exception class for execution timeout. */ class TimeoutException extends RuntimeException { private static final long serialVersionUID = 1L; public TimeoutException() { super("HTTP Request exceeded execution time limit."); } } } diff --git a/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java b/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java index 42c3e90e..d08f4dd5 100644 --- a/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java +++ b/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java @@ -1,594 +1,595 @@ /** * Copyright (c) 2016 YCSB contributors All rights reserved. * Copyright 2014 Basho Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.riak; import com.basho.riak.client.api.commands.buckets.StoreBucketProperties; import com.basho.riak.client.api.commands.kv.StoreValue; import com.basho.riak.client.api.commands.kv.UpdateValue; import com.basho.riak.client.core.RiakFuture; import com.basho.riak.client.core.query.RiakObject; import com.basho.riak.client.core.query.indexes.LongIntIndex; import com.basho.riak.client.core.util.BinaryValue; import com.yahoo.ycsb.*; import java.io.IOException; import java.io.InputStream; import java.util.*; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import com.basho.riak.client.api.RiakClient; import com.basho.riak.client.api.cap.Quorum; import com.basho.riak.client.api.commands.indexes.IntIndexQuery; import com.basho.riak.client.api.commands.kv.DeleteValue; import com.basho.riak.client.api.commands.kv.FetchValue; import com.basho.riak.client.core.RiakCluster; import com.basho.riak.client.core.RiakNode; import com.basho.riak.client.core.query.Location; import com.basho.riak.client.core.query.Namespace; import static com.yahoo.ycsb.db.riak.RiakUtils.createResultHashMap; import static com.yahoo.ycsb.db.riak.RiakUtils.getKeyAsLong; import static com.yahoo.ycsb.db.riak.RiakUtils.serializeTable; /** * Riak KV 2.x.y client for YCSB framework. * */ public class RiakKVClient extends DB { private static final String HOST_PROPERTY = "riak.hosts"; private static final String PORT_PROPERTY = "riak.port"; private static final String BUCKET_TYPE_PROPERTY = "riak.bucket_type"; private static final String R_VALUE_PROPERTY = "riak.r_val"; private static final String W_VALUE_PROPERTY = "riak.w_val"; private static final String READ_RETRY_COUNT_PROPERTY = "riak.read_retry_count"; private static final String WAIT_TIME_BEFORE_RETRY_PROPERTY = "riak.wait_time_before_retry"; private static final String TRANSACTION_TIME_LIMIT_PROPERTY = "riak.transaction_time_limit"; private static final String STRONG_CONSISTENCY_PROPERTY = "riak.strong_consistency"; private static final String STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY = "riak.strong_consistent_scans_bucket_type"; private static final String DEBUG_PROPERTY = "riak.debug"; private static final Status TIME_OUT = new Status("TIME_OUT", "Cluster didn't respond after maximum wait time."); private String[] hosts; private int port; private String bucketType; private String bucketType2i; private Quorum rvalue; private Quorum wvalue; private int readRetryCount; private int waitTimeBeforeRetry; private int transactionTimeLimit; private boolean strongConsistency; private String strongConsistentScansBucketType; private boolean performStrongConsistentScans; private boolean debug; private RiakClient riakClient; private RiakCluster riakCluster; private void loadDefaultProperties() { InputStream propFile = RiakKVClient.class.getClassLoader().getResourceAsStream("riak.properties"); Properties propsPF = new Properties(System.getProperties()); try { propsPF.load(propFile); } catch (IOException e) { e.printStackTrace(); } hosts = propsPF.getProperty(HOST_PROPERTY).split(","); port = Integer.parseInt(propsPF.getProperty(PORT_PROPERTY)); bucketType = propsPF.getProperty(BUCKET_TYPE_PROPERTY); rvalue = new Quorum(Integer.parseInt(propsPF.getProperty(R_VALUE_PROPERTY))); wvalue = new Quorum(Integer.parseInt(propsPF.getProperty(W_VALUE_PROPERTY))); readRetryCount = Integer.parseInt(propsPF.getProperty(READ_RETRY_COUNT_PROPERTY)); waitTimeBeforeRetry = Integer.parseInt(propsPF.getProperty(WAIT_TIME_BEFORE_RETRY_PROPERTY)); transactionTimeLimit = Integer.parseInt(propsPF.getProperty(TRANSACTION_TIME_LIMIT_PROPERTY)); strongConsistency = Boolean.parseBoolean(propsPF.getProperty(STRONG_CONSISTENCY_PROPERTY)); strongConsistentScansBucketType = propsPF.getProperty(STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY); debug = Boolean.parseBoolean(propsPF.getProperty(DEBUG_PROPERTY)); } private void loadProperties() { // First, load the default properties... loadDefaultProperties(); // ...then, check for some props set at command line! Properties props = getProperties(); String portString = props.getProperty(PORT_PROPERTY); if (portString != null) { port = Integer.parseInt(portString); } String hostsString = props.getProperty(HOST_PROPERTY); if (hostsString != null) { hosts = hostsString.split(","); } String bucketTypeString = props.getProperty(BUCKET_TYPE_PROPERTY); if (bucketTypeString != null) { bucketType = bucketTypeString; } String rValueString = props.getProperty(R_VALUE_PROPERTY); if (rValueString != null) { rvalue = new Quorum(Integer.parseInt(rValueString)); } String wValueString = props.getProperty(W_VALUE_PROPERTY); if (wValueString != null) { wvalue = new Quorum(Integer.parseInt(wValueString)); } String readRetryCountString = props.getProperty(READ_RETRY_COUNT_PROPERTY); if (readRetryCountString != null) { readRetryCount = Integer.parseInt(readRetryCountString); } String waitTimeBeforeRetryString = props.getProperty(WAIT_TIME_BEFORE_RETRY_PROPERTY); if (waitTimeBeforeRetryString != null) { waitTimeBeforeRetry = Integer.parseInt(waitTimeBeforeRetryString); } String transactionTimeLimitString = props.getProperty(TRANSACTION_TIME_LIMIT_PROPERTY); if (transactionTimeLimitString != null) { transactionTimeLimit = Integer.parseInt(transactionTimeLimitString); } String strongConsistencyString = props.getProperty(STRONG_CONSISTENCY_PROPERTY); if (strongConsistencyString != null) { strongConsistency = Boolean.parseBoolean(strongConsistencyString); } String strongConsistentScansBucketTypeString = props.getProperty(STRONG_CONSISTENT_SCANS_BUCKET_TYPE_PROPERTY); if (strongConsistentScansBucketTypeString != null) { strongConsistentScansBucketType = strongConsistentScansBucketTypeString; } String debugString = props.getProperty(DEBUG_PROPERTY); if (debugString != null) { debug = Boolean.parseBoolean(debugString); } } public void init() throws DBException { loadProperties(); RiakNode.Builder builder = new RiakNode.Builder().withRemotePort(port); List nodes = RiakNode.Builder.buildNodes(builder, Arrays.asList(hosts)); riakCluster = new RiakCluster.Builder(nodes).build(); try { riakCluster.start(); riakClient = new RiakClient(riakCluster); } catch (Exception e) { System.err.println("Unable to properly start up the cluster. Reason: " + e.toString()); throw new DBException(e); } // If strong consistency is in use, we need to change the bucket-type where the 2i indexes will be stored. if (strongConsistency && !strongConsistentScansBucketType.isEmpty()) { // The 2i indexes have to be stored in the appositely created strongConsistentScansBucketType: this however has // to be done only if the user actually created it! So, if the latter doesn't exist, then the scan transactions // will not be performed at all. bucketType2i = strongConsistentScansBucketType; performStrongConsistentScans = true; } else { // If instead eventual consistency is in use, then the 2i indexes have to be stored in the bucket-type // indicated with the bucketType variable. bucketType2i = bucketType; performStrongConsistentScans = false; } if (debug) { System.err.println("DEBUG ENABLED. Configuration parameters:"); System.err.println("-----------------------------------------"); System.err.println("Hosts: " + Arrays.toString(hosts)); System.err.println("Port: " + port); System.err.println("Bucket Type: " + bucketType); System.err.println("R Val: " + rvalue.toString()); System.err.println("W Val: " + wvalue.toString()); System.err.println("Read Retry Count: " + readRetryCount); System.err.println("Wait Time Before Retry: " + waitTimeBeforeRetry + " ms"); System.err.println("Transaction Time Limit: " + transactionTimeLimit + " s"); System.err.println("Consistency model: " + (strongConsistency ? "Strong" : "Eventual")); if (strongConsistency) { System.err.println("Strong Consistent Scan Transactions " + (performStrongConsistentScans ? "" : "NOT ") + "allowed."); } } } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table (Riak bucket) * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { Location location = new Location(new Namespace(bucketType, table), key); FetchValue fv = new FetchValue.Builder(location).withOption(FetchValue.Option.R, rvalue).build(); FetchValue.Response response; try { response = fetch(fv); if (response.isNotFound()) { if (debug) { System.err.println("Unable to read key " + key + ". Reason: NOT FOUND"); } return Status.NOT_FOUND; } } catch (TimeoutException e) { if (debug) { System.err.println("Unable to read key " + key + ". Reason: TIME OUT"); } return TIME_OUT; } catch (Exception e) { if (debug) { System.err.println("Unable to read key " + key + ". Reason: " + e.toString()); } return Status.ERROR; } // Create the result HashMap. - createResultHashMap(fields, response, result); - + HashMap partialResult = new HashMap<>(); + createResultHashMap(fields, response, partialResult); + result.putAll(partialResult); return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored in * a HashMap. * Note: The scan operation requires the use of secondary indexes (2i) and LevelDB. * * @param table The name of the table (Riak bucket) * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { if (strongConsistency && !performStrongConsistentScans) { return Status.NOT_IMPLEMENTED; } // The strong consistent bucket-type is not capable of storing 2i indexes. So, we need to read them from the fake // one (which we use only to store indexes). This is why, when using such a consistency model, the bucketType2i // variable is set to FAKE_BUCKET_TYPE. IntIndexQuery iiq = new IntIndexQuery .Builder(new Namespace(bucketType2i, table), "key", getKeyAsLong(startkey), Long.MAX_VALUE) .withMaxResults(recordcount) .withPaginationSort(true) .build(); Location location; RiakFuture future = riakClient.executeAsync(iiq); try { IntIndexQuery.Response response = future.get(transactionTimeLimit, TimeUnit.SECONDS); List entries = response.getEntries(); // If no entries were retrieved, then something bad happened... if (entries.size() == 0) { if (debug) { System.err.println("Unable to scan any record starting from key " + startkey + ", aborting transaction. " + "Reason: NOT FOUND"); } return Status.NOT_FOUND; } for (IntIndexQuery.Response.Entry entry : entries) { // If strong consistency is in use, then the actual location of the object we want to read is obtained by // fetching the key from the one retrieved with the 2i indexes search operation. if (strongConsistency) { location = new Location(new Namespace(bucketType, table), entry.getRiakObjectLocation().getKeyAsString()); } else { location = entry.getRiakObjectLocation(); } FetchValue fv = new FetchValue.Builder(location) .withOption(FetchValue.Option.R, rvalue) .build(); FetchValue.Response keyResponse = fetch(fv); if (keyResponse.isNotFound()) { if (debug) { System.err.println("Unable to scan all requested records starting from key " + startkey + ", aborting " + "transaction. Reason: NOT FOUND"); } return Status.NOT_FOUND; } // Create the partial result to add to the result vector. HashMap partialResult = new HashMap<>(); createResultHashMap(fields, keyResponse, partialResult); result.add(partialResult); } } catch (TimeoutException e) { if (debug) { System.err.println("Unable to scan all requested records starting from key " + startkey + ", aborting " + "transaction. Reason: TIME OUT"); } return TIME_OUT; } catch (Exception e) { if (debug) { System.err.println("Unable to scan all records starting from key " + startkey + ", aborting transaction. " + "Reason: " + e.toString()); } return Status.ERROR; } return Status.OK; } /** * Tries to perform a read and, whenever it fails, retries to do it. It actually does try as many time as indicated, * even if the function riakClient.execute(fv) throws an exception. This is needed for those situation in which the * cluster is unable to respond properly due to overload. Note however that if the cluster doesn't respond after * transactionTimeLimit, the transaction is discarded immediately. * * @param fv The value to fetch from the cluster. */ private FetchValue.Response fetch(FetchValue fv) throws TimeoutException { FetchValue.Response response = null; for (int i = 0; i < readRetryCount; i++) { RiakFuture future = riakClient.executeAsync(fv); try { response = future.get(transactionTimeLimit, TimeUnit.SECONDS); if (!response.isNotFound()) { break; } } catch (TimeoutException e) { // Let the callee decide how to handle this exception... throw new TimeoutException(); } catch (Exception e) { // Sleep for a few ms before retrying... try { Thread.sleep(waitTimeBeforeRetry); } catch (InterruptedException e1) { e1.printStackTrace(); } } } return response; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. Also creates a secondary index (2i) for each record consisting of the key * converted to long to be used for the scan operation. * * @param table The name of the table (Riak bucket) * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { Location location = new Location(new Namespace(bucketType, table), key); RiakObject object = new RiakObject(); // Strong consistency doesn't support secondary indexing, but eventually consistent model does. So, we can mock a // 2i usage by creating a fake object stored in an eventually consistent bucket-type with the SAME KEY THAT THE // ACTUAL OBJECT HAS. This latter is obviously stored in the strong consistent bucket-type indicated with the // riak.bucket_type property. if (strongConsistency && performStrongConsistentScans) { // Create a fake object to store in the default bucket-type just to keep track of the 2i indices. Location fakeLocation = new Location(new Namespace(strongConsistentScansBucketType, table), key); // Obviously, we want the fake object to contain as less data as possible. We can't create a void object, so // we have to choose the minimum data size allowed: it is one byte. RiakObject fakeObject = new RiakObject(); fakeObject.setValue(BinaryValue.create(new byte[]{0x00})); fakeObject.getIndexes().getIndex(LongIntIndex.named("key_int")).add(getKeyAsLong(key)); StoreValue fakeStore = new StoreValue.Builder(fakeObject) .withLocation(fakeLocation) .build(); // We don't mind whether the operation is finished or not, because waiting for it to complete would slow down the // client and make our solution too heavy to be seen as a valid compromise. This will obviously mean that under // heavy load conditions a scan operation could fail due to an unfinished "fakeStore". riakClient.executeAsync(fakeStore); } else if (!strongConsistency) { // The next operation is useless when using strong consistency model, so it's ok to perform it only when using // eventual consistency. object.getIndexes().getIndex(LongIntIndex.named("key_int")).add(getKeyAsLong(key)); } // Store proper values into the object. object.setValue(BinaryValue.create(serializeTable(values))); StoreValue store = new StoreValue.Builder(object) .withOption(StoreValue.Option.W, wvalue) .withLocation(location) .build(); RiakFuture future = riakClient.executeAsync(store); try { future.get(transactionTimeLimit, TimeUnit.SECONDS); } catch (TimeoutException e) { if (debug) { System.err.println("Unable to insert key " + key + ". Reason: TIME OUT"); } return TIME_OUT; } catch (Exception e) { if (debug) { System.err.println("Unable to insert key " + key + ". Reason: " + e.toString()); } return Status.ERROR; } return Status.OK; } /** * Auxiliary class needed for object substitution within the update operation. It is a fundamental part of the * fetch-update (locally)-store cycle described by Basho to properly perform a strong-consistent update. */ private static final class UpdateEntity extends UpdateValue.Update { private final RiakObject object; private UpdateEntity(RiakObject object) { this.object = object; } //Simply returns the object. @Override public RiakObject apply(RiakObject original) { return object; } } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table (Riak bucket) * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { // If eventual consistency model is in use, then an update operation is pratically equivalent to an insert one. if (!strongConsistency) { return insert(table, key, values); } Location location = new Location(new Namespace(bucketType, table), key); UpdateValue update = new UpdateValue.Builder(location) .withUpdate(new UpdateEntity(new RiakObject().setValue(BinaryValue.create(serializeTable(values))))) .build(); RiakFuture future = riakClient.executeAsync(update); try { // For some reason, the update transaction doesn't throw any exception when no cluster has been started, so one // needs to check whether it was done or not. When calling the wasUpdated() function with no nodes available, a // NullPointerException is thrown. // Moreover, such exception could be thrown when more threads are trying to update the same key or, more // generally, when the system is being queried by many clients (i.e. overloaded). This is a known limitation of // Riak KV's strong consistency implementation. future.get(transactionTimeLimit, TimeUnit.SECONDS).wasUpdated(); } catch (TimeoutException e) { if (debug) { System.err.println("Unable to update key " + key + ". Reason: TIME OUT"); } return TIME_OUT; } catch (Exception e) { if (debug) { System.err.println("Unable to update key " + key + ". Reason: " + e.toString()); } return Status.ERROR; } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table (Riak bucket) * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { Location location = new Location(new Namespace(bucketType, table), key); DeleteValue dv = new DeleteValue.Builder(location).build(); RiakFuture future = riakClient.executeAsync(dv); try { future.get(transactionTimeLimit, TimeUnit.SECONDS); } catch (TimeoutException e) { if (debug) { System.err.println("Unable to delete key " + key + ". Reason: TIME OUT"); } return TIME_OUT; } catch (Exception e) { if (debug) { System.err.println("Unable to delete key " + key + ". Reason: " + e.toString()); } return Status.ERROR; } return Status.OK; } public void cleanup() throws DBException { try { riakCluster.shutdown(); } catch (Exception e) { System.err.println("Unable to properly shutdown the cluster. Reason: " + e.toString()); throw new DBException(e); } } /** * Auxiliary function needed for testing. It configures the default bucket-type to take care of the consistency * problem by disallowing the siblings creation. Moreover, it disables strong consistency, because we don't have * the possibility to create a proper bucket-type to use to fake 2i indexes usage. * * @param bucket The bucket name. * @throws Exception Thrown if something bad happens. */ void setTestEnvironment(String bucket) throws Exception { bucketType = "default"; bucketType2i = bucketType; strongConsistency = false; Namespace ns = new Namespace(bucketType, bucket); StoreBucketProperties newBucketProperties = new StoreBucketProperties.Builder(ns).withAllowMulti(false).build(); riakClient.execute(newBucketProperties); } } diff --git a/s3/src/main/java/com/yahoo/ycsb/db/S3Client.java b/s3/src/main/java/com/yahoo/ycsb/db/S3Client.java index 8ef3f5be..71b7f7cd 100644 --- a/s3/src/main/java/com/yahoo/ycsb/db/S3Client.java +++ b/s3/src/main/java/com/yahoo/ycsb/db/S3Client.java @@ -1,527 +1,527 @@ /** * Copyright (c) 2015 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. * * S3 storage client binding for YCSB. */ package com.yahoo.ycsb.db; import java.util.HashMap; import java.util.Properties; import java.util.Set; import java.util.Vector; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.*; import java.util.concurrent.atomic.AtomicInteger; import java.net.*; import com.yahoo.ycsb.ByteArrayByteIterator; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.*; import com.amazonaws.auth.*; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PutObjectResult; import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.ClientConfiguration; import com.amazonaws.regions.Region; import com.amazonaws.regions.Regions; import com.amazonaws.Protocol; import com.amazonaws.services.s3.model.DeleteObjectRequest; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.SSECustomerKey; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.GetObjectMetadataRequest; /** * S3 Storage client for YCSB framework. * * Properties to set: * * s3.accessKeyId=access key S3 aws * s3.secretKey=secret key S3 aws * s3.endPoint=s3.amazonaws.com * s3.region=us-east-1 * The parameter table is the name of the Bucket where to upload the files. * This must be created before to start the benchmark * The size of the file to upload is determined by two parameters: * - fieldcount this is the number of fields of a record in YCSB * - fieldlength this is the size in bytes of a single field in the record * together these two parameters define the size of the file to upload, * the size in bytes is given by the fieldlength multiplied by the fieldcount. * The name of the file is determined by the parameter key. *This key is automatically generated by YCSB. * */ public class S3Client extends DB { private static AmazonS3Client s3Client; private static String sse; private static SSECustomerKey ssecKey; private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** * Cleanup any state for this storage. * Called once per S3 instance; */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { try { s3Client.shutdown(); System.out.println("The client is shutdown successfully"); } catch (Exception e){ System.err.println("Could not shutdown the S3Client: "+e.toString()); e.printStackTrace(); } finally { if (s3Client != null){ s3Client = null; } } } } /** * Delete a file from S3 Storage. * * @param bucket * The name of the bucket * @param key * The record key of the file to delete. * @return OK on success, otherwise ERROR. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String bucket, String key) { try { s3Client.deleteObject(new DeleteObjectRequest(bucket, key)); } catch (Exception e){ System.err.println("Not possible to delete the key "+key); e.printStackTrace(); return Status.ERROR; } return Status.OK; } /** * Initialize any state for the storage. * Called once per S3 instance; If the client is not null it is re-used. */ @Override public void init() throws DBException { final int count = INIT_COUNT.incrementAndGet(); synchronized (S3Client.class){ Properties propsCL = getProperties(); int recordcount = Integer.parseInt( propsCL.getProperty("recordcount")); int operationcount = Integer.parseInt( propsCL.getProperty("operationcount")); int numberOfOperations = 0; if (recordcount > 0){ if (recordcount > operationcount){ numberOfOperations = recordcount; } else { numberOfOperations = operationcount; } } else { numberOfOperations = operationcount; } if (count <= numberOfOperations) { String accessKeyId = null; String secretKey = null; String endPoint = null; String region = null; String maxErrorRetry = null; String maxConnections = null; String protocol = null; BasicAWSCredentials s3Credentials; ClientConfiguration clientConfig; if (s3Client != null) { System.out.println("Reusing the same client"); return; } try { InputStream propFile = S3Client.class.getClassLoader() .getResourceAsStream("s3.properties"); Properties props = new Properties(System.getProperties()); props.load(propFile); accessKeyId = props.getProperty("s3.accessKeyId"); if (accessKeyId == null){ accessKeyId = propsCL.getProperty("s3.accessKeyId"); } System.out.println(accessKeyId); secretKey = props.getProperty("s3.secretKey"); if (secretKey == null){ secretKey = propsCL.getProperty("s3.secretKey"); } System.out.println(secretKey); endPoint = props.getProperty("s3.endPoint"); if (endPoint == null){ endPoint = propsCL.getProperty("s3.endPoint", "s3.amazonaws.com"); } System.out.println(endPoint); region = props.getProperty("s3.region"); if (region == null){ region = propsCL.getProperty("s3.region", "us-east-1"); } System.out.println(region); maxErrorRetry = props.getProperty("s3.maxErrorRetry"); if (maxErrorRetry == null){ maxErrorRetry = propsCL.getProperty("s3.maxErrorRetry", "15"); } maxConnections = props.getProperty("s3.maxConnections"); if (maxConnections == null){ maxConnections = propsCL.getProperty("s3.maxConnections"); } protocol = props.getProperty("s3.protocol"); if (protocol == null){ protocol = propsCL.getProperty("s3.protocol", "HTTPS"); } sse = props.getProperty("s3.sse"); if (sse == null){ sse = propsCL.getProperty("s3.sse", "false"); } String ssec = props.getProperty("s3.ssec"); if (ssec == null){ ssec = propsCL.getProperty("s3.ssec", null); } else { ssecKey = new SSECustomerKey(ssec); } } catch (Exception e){ System.err.println("The file properties doesn't exist "+e.toString()); e.printStackTrace(); } try { System.out.println("Inizializing the S3 connection"); s3Credentials = new BasicAWSCredentials(accessKeyId, secretKey); clientConfig = new ClientConfiguration(); clientConfig.setMaxErrorRetry(Integer.parseInt(maxErrorRetry)); if(protocol.equals("HTTP")) { clientConfig.setProtocol(Protocol.HTTP); } else { clientConfig.setProtocol(Protocol.HTTPS); } if(maxConnections != null) { clientConfig.setMaxConnections(Integer.parseInt(maxConnections)); } s3Client = new AmazonS3Client(s3Credentials, clientConfig); s3Client.setRegion(Region.getRegion(Regions.fromName(region))); s3Client.setEndpoint(endPoint); System.out.println("Connection successfully initialized"); } catch (Exception e){ System.err.println("Could not connect to S3 storage: "+ e.toString()); e.printStackTrace(); throw new DBException(e); } } else { System.err.println( "The number of threads must be less or equal than the operations"); throw new DBException(new Error( "The number of threads must be less or equal than the operations")); } } } /** * Create a new File in the Bucket. Any field/value pairs in the specified * values HashMap will be written into the file with the specified record * key. * * @param bucket * The name of the bucket * @param key * The record key of the file to insert. * @param values * A HashMap of field/value pairs to insert in the file. * Only the content of the first field is written to a byteArray * multiplied by the number of field. In this way the size * of the file to upload is determined by the fieldlength * and fieldcount parameters. * @return OK on success, ERROR otherwise. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String bucket, String key, - HashMap values) { + Map values) { return writeToStorage(bucket, key, values, true, sse, ssecKey); } /** * Read a file from the Bucket. Each field/value pair from the result * will be stored in a HashMap. * * @param bucket * The name of the bucket * @param key * The record key of the file to read. * @param fields * The list of fields to read, or null for all of them, * it is null by default * @param result * A HashMap of field/value pairs for the result * @return OK on success, ERROR otherwise. */ @Override public Status read(String bucket, String key, Set fields, - HashMap result) { + Map result) { return readFromStorage(bucket, key, result, ssecKey); } /** * Update a file in the database. Any field/value pairs in the specified * values HashMap will be written into the file with the specified file * key, overwriting any existing values with the same field name. * * @param bucket * The name of the bucket * @param key * The file key of the file to write. * @param values * A HashMap of field/value pairs to update in the record * @return OK on success, ERORR otherwise. */ @Override public Status update(String bucket, String key, - HashMap values) { + Map values) { return writeToStorage(bucket, key, values, false, sse, ssecKey); } /** * Perform a range scan for a set of files in the bucket. Each * field/value pair from the result will be stored in a HashMap. * * @param bucket * The name of the bucket * @param startkey * The file key of the first file to read. * @param recordcount * The number of files to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one file * @return OK on success, ERROR otherwise. */ @Override public Status scan(String bucket, String startkey, int recordcount, Set fields, Vector> result) { return scanFromStorage(bucket, startkey, recordcount, result, ssecKey); } /** * Upload a new object to S3 or update an object on S3. * * @param bucket * The name of the bucket * @param key * The file key of the object to upload/update. * @param values * The data to be written on the object * @param updateMarker * A boolean value. If true a new object will be uploaded * to S3. If false an existing object will be re-uploaded * */ protected Status writeToStorage(String bucket, String key, - HashMap values, Boolean updateMarker, - String sseLocal, SSECustomerKey ssecLocal) { + Map values, Boolean updateMarker, + String sseLocal, SSECustomerKey ssecLocal) { int totalSize = 0; int fieldCount = values.size(); //number of fields to concatenate // getting the first field in the values Object keyToSearch = values.keySet().toArray()[0]; // getting the content of just one field byte[] sourceArray = values.get(keyToSearch).toArray(); int sizeArray = sourceArray.length; //size of each array if (updateMarker){ totalSize = sizeArray*fieldCount; } else { try { Map.Entry objectAndMetadata = getS3ObjectAndMetadata(bucket, key, ssecLocal); int sizeOfFile = (int)objectAndMetadata.getValue().getContentLength(); fieldCount = sizeOfFile/sizeArray; totalSize = sizeOfFile; objectAndMetadata.getKey().close(); } catch (Exception e){ System.err.println("Not possible to get the object :"+key); e.printStackTrace(); return Status.ERROR; } } byte[] destinationArray = new byte[totalSize]; int offset = 0; for (int i = 0; i < fieldCount; i++) { System.arraycopy(sourceArray, 0, destinationArray, offset, sizeArray); offset += sizeArray; } try (InputStream input = new ByteArrayInputStream(destinationArray)) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(totalSize); PutObjectRequest putObjectRequest = null; if (sseLocal.equals("true")) { metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); putObjectRequest = new PutObjectRequest(bucket, key, input, metadata); } else if (ssecLocal != null) { putObjectRequest = new PutObjectRequest(bucket, key, input, metadata).withSSECustomerKey(ssecLocal); } else { putObjectRequest = new PutObjectRequest(bucket, key, input, metadata); } try { PutObjectResult res = s3Client.putObject(putObjectRequest); if(res.getETag() == null) { return Status.ERROR; } else { if (sseLocal.equals("true")) { System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm()); } else if (ssecLocal != null) { System.out.println("Uploaded object encryption status is " + res.getSSEAlgorithm()); } } } catch (Exception e) { System.err.println("Not possible to write object :"+key); e.printStackTrace(); return Status.ERROR; } } catch (Exception e) { System.err.println("Error in the creation of the stream :"+e.toString()); e.printStackTrace(); return Status.ERROR; } return Status.OK; } /** * Download an object from S3. * * @param bucket * The name of the bucket * @param key * The file key of the object to upload/update. * @param result * The Hash map where data from the object are written * */ protected Status readFromStorage(String bucket, String key, - HashMap result, SSECustomerKey ssecLocal) { + Map result, SSECustomerKey ssecLocal) { try { Map.Entry objectAndMetadata = getS3ObjectAndMetadata(bucket, key, ssecLocal); InputStream objectData = objectAndMetadata.getKey().getObjectContent(); //consuming the stream // writing the stream to bytes and to results int sizeOfFile = (int)objectAndMetadata.getValue().getContentLength(); byte[] inputStreamToByte = new byte[sizeOfFile]; objectData.read(inputStreamToByte, 0, sizeOfFile); result.put(key, new ByteArrayByteIterator(inputStreamToByte)); objectData.close(); objectAndMetadata.getKey().close(); } catch (Exception e){ System.err.println("Not possible to get the object "+key); e.printStackTrace(); return Status.ERROR; } return Status.OK; } private Map.Entry getS3ObjectAndMetadata(String bucket, String key, SSECustomerKey ssecLocal) { GetObjectRequest getObjectRequest; GetObjectMetadataRequest getObjectMetadataRequest; if (ssecLocal != null) { getObjectRequest = new GetObjectRequest(bucket, key).withSSECustomerKey(ssecLocal); getObjectMetadataRequest = new GetObjectMetadataRequest(bucket, key).withSSECustomerKey(ssecLocal); } else { getObjectRequest = new GetObjectRequest(bucket, key); getObjectMetadataRequest = new GetObjectMetadataRequest(bucket, key); } return new AbstractMap.SimpleEntry<>(s3Client.getObject(getObjectRequest), s3Client.getObjectMetadata(getObjectMetadataRequest)); } /** * Perform an emulation of a database scan operation on a S3 bucket. * * @param bucket * The name of the bucket * @param startkey * The file key of the first file to read. * @param recordcount * The number of files to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one file * */ protected Status scanFromStorage(String bucket, String startkey, int recordcount, Vector> result, SSECustomerKey ssecLocal) { int counter = 0; ObjectListing listing = s3Client.listObjects(bucket); List summaries = listing.getObjectSummaries(); List keyList = new ArrayList(); int startkeyNumber = 0; int numberOfIteration = 0; // getting the list of files in the bucket while (listing.isTruncated()) { listing = s3Client.listNextBatchOfObjects(listing); summaries.addAll(listing.getObjectSummaries()); } for (S3ObjectSummary summary : summaries) { String summaryKey = summary.getKey(); keyList.add(summaryKey); } // Sorting the list of files in Alphabetical order Collections.sort(keyList); // sorting the list // Getting the position of the startingfile for the scan for (String key : keyList) { if (key.equals(startkey)){ startkeyNumber = counter; } else { counter = counter + 1; } } // Checking if the total number of file is bigger than the file to read, // if not using the total number of Files if (recordcount < keyList.size()) { numberOfIteration = recordcount; } else { numberOfIteration = keyList.size(); } // Reading the Files starting from the startkey File till the end // of the Files or Till the recordcount number for (int i = startkeyNumber; i < numberOfIteration; i++){ HashMap resultTemp = new HashMap(); readFromStorage(bucket, keyList.get(i), resultTemp, ssecLocal); result.add(resultTemp); } return Status.OK; } } diff --git a/solr/src/main/java/com/yahoo/ycsb/db/solr/SolrClient.java b/solr/src/main/java/com/yahoo/ycsb/db/solr/SolrClient.java index 4232221b..9b131363 100644 --- a/solr/src/main/java/com/yahoo/ycsb/db/solr/SolrClient.java +++ b/solr/src/main/java/com/yahoo/ycsb/db/solr/SolrClient.java @@ -1,332 +1,332 @@ /** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.solr; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.impl.CloudSolrClient; import org.apache.solr.client.solrj.impl.HttpClientUtil; import org.apache.solr.client.solrj.impl.HttpSolrClient; import org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.client.solrj.response.UpdateResponse; import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrInputDocument; import java.io.IOException; import java.util.*; import java.util.Map.Entry; /** * Solr client for YCSB framework. * *

* Default properties to set: *

*
    * See README.md *
* */ public class SolrClient extends DB { public static final String DEFAULT_CLOUD_MODE = "false"; public static final String DEFAULT_BATCH_MODE = "false"; public static final String DEFAULT_ZOOKEEPER_HOSTS = "localhost:2181"; public static final String DEFAULT_SOLR_BASE_URL = "http://localhost:8983/solr"; public static final String DEFAULT_COMMIT_WITHIN_TIME = "1000"; private org.apache.solr.client.solrj.SolrClient client; private Integer commitTime; private Boolean batchMode; /** * Initialize any state for this DB. Called once per DB instance; there is one DB instance per * client thread. */ @Override public void init() throws DBException { Properties props = getProperties(); commitTime = Integer .parseInt(props.getProperty("solr.commit.within.time", DEFAULT_COMMIT_WITHIN_TIME)); batchMode = Boolean.parseBoolean(props.getProperty("solr.batch.mode", DEFAULT_BATCH_MODE)); String jaasConfPath = props.getProperty("solr.jaas.conf.path"); if(jaasConfPath != null) { System.setProperty("java.security.auth.login.config", jaasConfPath); HttpClientUtil.setConfigurer(new Krb5HttpClientConfigurer()); } // Check if Solr cluster is running in SolrCloud or Stand-alone mode Boolean cloudMode = Boolean.parseBoolean(props.getProperty("solr.cloud", DEFAULT_CLOUD_MODE)); System.err.println("Solr Cloud Mode = " + cloudMode); if (cloudMode) { System.err.println("Solr Zookeeper Remote Hosts = " + props.getProperty("solr.zookeeper.hosts", DEFAULT_ZOOKEEPER_HOSTS)); client = new CloudSolrClient( props.getProperty("solr.zookeeper.hosts", DEFAULT_ZOOKEEPER_HOSTS)); } else { client = new HttpSolrClient(props.getProperty("solr.base.url", DEFAULT_SOLR_BASE_URL)); } } @Override public void cleanup() throws DBException { try { client.close(); } catch (IOException e) { throw new DBException(e); } } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be * written into the record with the specified record key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", key); for (Entry entry : StringByteIterator.getStringMap(values).entrySet()) { doc.addField(entry.getKey(), entry.getValue()); } UpdateResponse response; if (batchMode) { response = client.add(table, doc, commitTime); } else { response = client.add(table, doc); client.commit(table); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override public Status delete(String table, String key) { try { UpdateResponse response; if (batchMode) { response = client.deleteById(table, key, commitTime); } else { response = client.deleteById(table, key); client.commit(table); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result will be stored in a * HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { Boolean returnFields = false; String[] fieldList = null; if (fields != null) { returnFields = true; fieldList = fields.toArray(new String[fields.size()]); } SolrQuery query = new SolrQuery(); query.setQuery("id:" + key); if (returnFields) { query.setFields(fieldList); } final QueryResponse response = client.query(table, query); SolrDocumentList results = response.getResults(); if ((results != null) && (results.getNumFound() > 0)) { for (String field : results.get(0).getFieldNames()) { result.put(field, new StringByteIterator(String.valueOf(results.get(0).getFirstValue(field)))); } } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be * written into the record with the specified record key, overwriting any existing values with the * same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { try { SolrInputDocument updatedDoc = new SolrInputDocument(); updatedDoc.addField("id", key); for (Entry entry : StringByteIterator.getStringMap(values).entrySet()) { updatedDoc.addField(entry.getKey(), Collections.singletonMap("set", entry.getValue())); } UpdateResponse writeResponse; if (batchMode) { writeResponse = client.add(table, updatedDoc, commitTime); } else { writeResponse = client.add(table, updatedDoc); client.commit(table); } return checkStatus(writeResponse.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the * result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { try { Boolean returnFields = false; String[] fieldList = null; if (fields != null) { returnFields = true; fieldList = fields.toArray(new String[fields.size()]); } SolrQuery query = new SolrQuery(); query.setQuery("*:*"); query.setParam("fq", "id:[ " + startkey + " TO * ]"); if (returnFields) { query.setFields(fieldList); } query.setRows(recordcount); final QueryResponse response = client.query(table, query); SolrDocumentList results = response.getResults(); HashMap entry; for (SolrDocument hit : results) { entry = new HashMap((int) results.getNumFound()); for (String field : hit.getFieldNames()) { entry.put(field, new StringByteIterator(String.valueOf(hit.getFirstValue(field)))); } result.add(entry); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } private Status checkStatus(int status) { Status responseStatus; switch (status) { case 0: responseStatus = Status.OK; break; case 400: responseStatus = Status.BAD_REQUEST; break; case 403: responseStatus = Status.FORBIDDEN; break; case 404: responseStatus = Status.NOT_FOUND; break; case 500: responseStatus = Status.ERROR; break; case 503: responseStatus = Status.SERVICE_UNAVAILABLE; break; default: responseStatus = Status.UNEXPECTED_STATE; break; } return responseStatus; } } diff --git a/solr6/src/main/java/com/yahoo/ycsb/db/solr6/SolrClient.java b/solr6/src/main/java/com/yahoo/ycsb/db/solr6/SolrClient.java index 50346b78..3affce37 100644 --- a/solr6/src/main/java/com/yahoo/ycsb/db/solr6/SolrClient.java +++ b/solr6/src/main/java/com/yahoo/ycsb/db/solr6/SolrClient.java @@ -1,331 +1,331 @@ /** * Copyright (c) 2016 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db.solr6; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.StringByteIterator; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.impl.CloudSolrClient; import org.apache.solr.client.solrj.impl.HttpClientUtil; import org.apache.solr.client.solrj.impl.HttpSolrClient; import org.apache.solr.client.solrj.impl.Krb5HttpClientConfigurer; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.client.solrj.response.UpdateResponse; import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrInputDocument; import java.io.IOException; import java.util.*; import java.util.Map.Entry; /** * Solr client for YCSB framework. * *

* Default properties to set: *

*
    * See README.md *
* */ public class SolrClient extends DB { public static final String DEFAULT_CLOUD_MODE = "false"; public static final String DEFAULT_BATCH_MODE = "false"; public static final String DEFAULT_ZOOKEEPER_HOSTS = "localhost:2181"; public static final String DEFAULT_SOLR_BASE_URL = "http://localhost:8983/solr"; public static final String DEFAULT_COMMIT_WITHIN_TIME = "1000"; private org.apache.solr.client.solrj.SolrClient client; private Integer commitTime; private Boolean batchMode; /** * Initialize any state for this DB. Called once per DB instance; there is one DB instance per * client thread. */ @Override public void init() throws DBException { Properties props = getProperties(); commitTime = Integer .parseInt(props.getProperty("solr.commit.within.time", DEFAULT_COMMIT_WITHIN_TIME)); batchMode = Boolean.parseBoolean(props.getProperty("solr.batch.mode", DEFAULT_BATCH_MODE)); String jaasConfPath = props.getProperty("solr.jaas.conf.path"); if(jaasConfPath != null) { System.setProperty("java.security.auth.login.config", jaasConfPath); HttpClientUtil.setConfigurer(new Krb5HttpClientConfigurer()); } // Check if Solr cluster is running in SolrCloud or Stand-alone mode Boolean cloudMode = Boolean.parseBoolean(props.getProperty("solr.cloud", DEFAULT_CLOUD_MODE)); System.err.println("Solr Cloud Mode = " + cloudMode); if (cloudMode) { System.err.println("Solr Zookeeper Remote Hosts = " + props.getProperty("solr.zookeeper.hosts", DEFAULT_ZOOKEEPER_HOSTS)); client = new CloudSolrClient.Builder().withZkHost( Arrays.asList(props.getProperty("solr.zookeeper.hosts", DEFAULT_ZOOKEEPER_HOSTS).split(","))).build(); } else { client = new HttpSolrClient.Builder(props.getProperty("solr.base.url", DEFAULT_SOLR_BASE_URL)).build(); } } @Override public void cleanup() throws DBException { try { client.close(); } catch (IOException e) { throw new DBException(e); } } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be * written into the record with the specified record key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", key); for (Entry entry : StringByteIterator.getStringMap(values).entrySet()) { doc.addField(entry.getKey(), entry.getValue()); } UpdateResponse response; if (batchMode) { response = client.add(table, doc, commitTime); } else { response = client.add(table, doc); client.commit(table); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override public Status delete(String table, String key) { try { UpdateResponse response; if (batchMode) { response = client.deleteById(table, key, commitTime); } else { response = client.deleteById(table, key); client.commit(table); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result will be stored in a * HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { Boolean returnFields = false; String[] fieldList = null; if (fields != null) { returnFields = true; fieldList = fields.toArray(new String[fields.size()]); } SolrQuery query = new SolrQuery(); query.setQuery("id:" + key); if (returnFields) { query.setFields(fieldList); } final QueryResponse response = client.query(table, query); SolrDocumentList results = response.getResults(); if ((results != null) && (results.getNumFound() > 0)) { for (String field : results.get(0).getFieldNames()) { result.put(field, new StringByteIterator(String.valueOf(results.get(0).getFirstValue(field)))); } } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be * written into the record with the specified record key, overwriting any existing values with the * same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { try { SolrInputDocument updatedDoc = new SolrInputDocument(); updatedDoc.addField("id", key); for (Entry entry : StringByteIterator.getStringMap(values).entrySet()) { updatedDoc.addField(entry.getKey(), Collections.singletonMap("set", entry.getValue())); } UpdateResponse writeResponse; if (batchMode) { writeResponse = client.add(table, updatedDoc, commitTime); } else { writeResponse = client.add(table, updatedDoc); client.commit(table); } return checkStatus(writeResponse.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the * result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error. See this class's description for a * discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { try { Boolean returnFields = false; String[] fieldList = null; if (fields != null) { returnFields = true; fieldList = fields.toArray(new String[fields.size()]); } SolrQuery query = new SolrQuery(); query.setQuery("*:*"); query.setParam("fq", "id:[ " + startkey + " TO * ]"); if (returnFields) { query.setFields(fieldList); } query.setRows(recordcount); final QueryResponse response = client.query(table, query); SolrDocumentList results = response.getResults(); HashMap entry; for (SolrDocument hit : results) { entry = new HashMap<>((int) results.getNumFound()); for (String field : hit.getFieldNames()) { entry.put(field, new StringByteIterator(String.valueOf(hit.getFirstValue(field)))); } result.add(entry); } return checkStatus(response.getStatus()); } catch (IOException | SolrServerException e) { e.printStackTrace(); } return Status.ERROR; } private Status checkStatus(int status) { Status responseStatus; switch (status) { case 0: responseStatus = Status.OK; break; case 400: responseStatus = Status.BAD_REQUEST; break; case 403: responseStatus = Status.FORBIDDEN; break; case 404: responseStatus = Status.NOT_FOUND; break; case 500: responseStatus = Status.ERROR; break; case 503: responseStatus = Status.SERVICE_UNAVAILABLE; break; default: responseStatus = Status.UNEXPECTED_STATE; break; } return responseStatus; } } diff --git a/tarantool/src/main/java/com/yahoo/ycsb/db/TarantoolClient.java b/tarantool/src/main/java/com/yahoo/ycsb/db/TarantoolClient.java index e86120e1..8d16b0a8 100644 --- a/tarantool/src/main/java/com/yahoo/ycsb/db/TarantoolClient.java +++ b/tarantool/src/main/java/com/yahoo/ycsb/db/TarantoolClient.java @@ -1,152 +1,152 @@ /** * Copyright (c) 2014 - 2016 YCSB Contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at *

* http://www.apache.org/licenses/LICENSE-2.0 *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import com.yahoo.ycsb.*; import org.tarantool.TarantoolConnection16; import org.tarantool.TarantoolConnection16Impl; import org.tarantool.TarantoolException; import java.util.*; import java.util.logging.Level; import java.util.logging.Logger; /** * YCSB binding for Tarantool. */ public class TarantoolClient extends DB { private static final Logger LOGGER = Logger.getLogger(TarantoolClient.class.getName()); private static final String HOST_PROPERTY = "tarantool.host"; private static final String PORT_PROPERTY = "tarantool.port"; private static final String SPACE_PROPERTY = "tarantool.space"; private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3301"; private static final String DEFAULT_SPACE = "1024"; private TarantoolConnection16 connection; private int spaceNo; public void init() throws DBException { Properties props = getProperties(); int port = Integer.parseInt(props.getProperty(PORT_PROPERTY, DEFAULT_PORT)); String host = props.getProperty(HOST_PROPERTY, DEFAULT_HOST); spaceNo = Integer.parseInt(props.getProperty(SPACE_PROPERTY, DEFAULT_SPACE)); try { this.connection = new TarantoolConnection16Impl(host, port); } catch (Exception exc) { throw new DBException("Can't initialize Tarantool connection", exc); } } public void cleanup() throws DBException { this.connection.close(); } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { return replace(key, values, "Can't insert element"); } private HashMap tupleConvertFilter(List input, Set fields) { HashMap result = new HashMap<>(); if (input == null) { return result; } for (int i = 1; i < input.toArray().length; i += 2) { if (fields == null || fields.contains(input.get(i))) { result.put(input.get(i), new StringByteIterator(input.get(i + 1))); } } return result; } @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { List response = this.connection.select(this.spaceNo, 0, Arrays.asList(key), 0, 1, 0); result = tupleConvertFilter(response, fields); return Status.OK; } catch (TarantoolException exc) { LOGGER.log(Level.SEVERE, "Can't select element", exc); return Status.ERROR; } catch (NullPointerException exc) { return Status.ERROR; } } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { List> response; try { response = this.connection.select(this.spaceNo, 0, Arrays.asList(startkey), 0, recordcount, 6); } catch (TarantoolException exc) { LOGGER.log(Level.SEVERE, "Can't select range elements", exc); return Status.ERROR; } catch (NullPointerException exc) { return Status.ERROR; } for (List i : response) { HashMap temp = tupleConvertFilter(i, fields); if (!temp.isEmpty()) { result.add((HashMap) temp.clone()); } } return Status.OK; } @Override public Status delete(String table, String key) { try { this.connection.delete(this.spaceNo, Collections.singletonList(key)); } catch (TarantoolException exc) { LOGGER.log(Level.SEVERE, "Can't delete element", exc); return Status.ERROR; } catch (NullPointerException e) { return Status.ERROR; } return Status.OK; } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { return replace(key, values, "Can't replace element"); } - private Status replace(String key, HashMap values, String exceptionDescription) { + private Status replace(String key, Map values, String exceptionDescription) { int j = 0; String[] tuple = new String[1 + 2 * values.size()]; tuple[0] = key; for (Map.Entry i : values.entrySet()) { tuple[j + 1] = i.getKey(); tuple[j + 2] = i.getValue().toString(); j += 2; } try { this.connection.replace(this.spaceNo, tuple); } catch (TarantoolException exc) { LOGGER.log(Level.SEVERE, exceptionDescription, exc); return Status.ERROR; } return Status.OK; } } diff --git a/voldemort/src/main/java/com/yahoo/ycsb/db/VoldemortClient.java b/voldemort/src/main/java/com/yahoo/ycsb/db/VoldemortClient.java index a4132b2f..4bc716cb 100644 --- a/voldemort/src/main/java/com/yahoo/ycsb/db/VoldemortClient.java +++ b/voldemort/src/main/java/com/yahoo/ycsb/db/VoldemortClient.java @@ -1,172 +1,172 @@ /** * Copyright (c) 2012 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.yahoo.ycsb.db; import java.util.HashMap; import java.util.Set; import java.util.Vector; import java.util.Map.Entry; import org.apache.log4j.Logger; import voldemort.client.ClientConfig; import voldemort.client.SocketStoreClientFactory; import voldemort.client.StoreClient; import voldemort.versioning.VectorClock; import voldemort.versioning.Versioned; import com.yahoo.ycsb.DB; import com.yahoo.ycsb.DBException; import com.yahoo.ycsb.Status; import com.yahoo.ycsb.ByteIterator; import com.yahoo.ycsb.StringByteIterator; /** * YCSB binding for * Voldemort. */ public class VoldemortClient extends DB { private static final Logger LOGGER = Logger.getLogger(VoldemortClient.class); private StoreClient> storeClient; private SocketStoreClientFactory socketFactory; private String storeName; /** * Initialize the DB layer. This accepts all properties allowed by the * Voldemort client. A store maps to a table. Required : bootstrap_urls * Additional property : store_name -> to preload once, should be same as -t * {@link ClientConfig} */ public void init() throws DBException { ClientConfig clientConfig = new ClientConfig(getProperties()); socketFactory = new SocketStoreClientFactory(clientConfig); // Retrieve store name storeName = getProperties().getProperty("store_name", "usertable"); // Use store name to retrieve client storeClient = socketFactory.getStoreClient(storeName); if (storeClient == null) { throw new DBException("Unable to instantiate store client"); } } public void cleanup() throws DBException { socketFactory.close(); } @Override public Status delete(String table, String key) { if (checkStore(table) == Status.ERROR) { return Status.ERROR; } if (storeClient.delete(key)) { return Status.OK; } return Status.ERROR; } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { if (checkStore(table) == Status.ERROR) { return Status.ERROR; } storeClient.put(key, (HashMap) StringByteIterator.getStringMap(values)); return Status.OK; } @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { if (checkStore(table) == Status.ERROR) { return Status.ERROR; } Versioned> versionedValue = storeClient.get(key); if (versionedValue == null) { return Status.NOT_FOUND; } if (fields != null) { for (String field : fields) { String val = versionedValue.getValue().get(field); if (val != null) { result.put(field, new StringByteIterator(val)); } } } else { StringByteIterator.putAllAsByteIterators(result, versionedValue.getValue()); } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { LOGGER.warn("Voldemort does not support Scan semantics"); return Status.OK; } @Override public Status update(String table, String key, - HashMap values) { + Map values) { if (checkStore(table) == Status.ERROR) { return Status.ERROR; } Versioned> versionedValue = storeClient.get(key); HashMap value = new HashMap(); VectorClock version; if (versionedValue != null) { version = ((VectorClock) versionedValue.getVersion()).incremented(0, 1); value = versionedValue.getValue(); for (Entry entry : values.entrySet()) { value.put(entry.getKey(), entry.getValue().toString()); } } else { version = new VectorClock(); StringByteIterator.putAllAsStrings(value, values); } storeClient.put(key, Versioned.value(value, version)); return Status.OK; } private Status checkStore(String table) { if (table.compareTo(storeName) != 0) { try { storeClient = socketFactory.getStoreClient(table); if (storeClient == null) { LOGGER.error("Could not instantiate storeclient for " + table); return Status.ERROR; } storeName = table; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } }