Skip to content

Commit

Permalink
Merge pull request #37 from SnappyDataInc/SNAP-618
Browse files Browse the repository at this point in the history
SNAP-618: update to hbase 0.98.X to be compatible with Spark's version
  • Loading branch information
Sumedh Wale committed Mar 27, 2016
2 parents 2ae3c12 + 99be188 commit 410123e
Show file tree
Hide file tree
Showing 25 changed files with 199 additions and 103 deletions.
8 changes: 4 additions & 4 deletions build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ allprojects {
jerseyVersion = '1.9'
hadoopJettyVersion = '6.1.26'
jsr305Version = '1.3.9'
hbaseVersion = '0.94.27'
hbaseVersion = '0.98.17-hadoop2'
derbyVersion = '10.10.2.0'
//hbaseVersion = '0.94.4-gemfire-r45047'
//hadoopVersion = '2.2.0-gphd-3.1.0.0'
Expand Down Expand Up @@ -800,8 +800,8 @@ def includeJar(def jarFile) {
return jarName.contains('jetty') || jarName.contains('spring') ||
jarName.contains('hadoop') || jarName.contains('protobuf') ||
jarName.contains('jersey') || jarName.contains('jetty') ||
jarName.contains('hbase') || jarName.contains('jsr305') ||
jarName.contains('pxf') || jarName.contains('jline')
jarName.contains('jsr305') || jarName.contains('pxf') ||
jarName.contains('jline')
}

// pack the entire GemFireXD product tree
Expand Down Expand Up @@ -966,4 +966,4 @@ task precheckin {
if (project.hasProperty('gfxd')) {
dependsOn cleanAll, buildAll, check
}
}
}
28 changes: 27 additions & 1 deletion gemfire-core/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,35 @@ dependencies {
provided "com.sun.jersey:jersey-server:${jerseyVersion}"
provided "org.mortbay.jetty:jetty:${hadoopJettyVersion}"
provided "org.mortbay.jetty:jetty-util:${hadoopJettyVersion}"
provided "org.apache.hbase:hbase:${hbaseVersion}"
provided "com.google.code.findbugs:jsr305:${jsr305Version}"

compile group: 'org.apache.hbase', name: 'hbase-protocol', version: hbaseVersion
compile(group: 'org.apache.hbase', name: 'hbase-common', version: hbaseVersion) {
exclude(group: 'org.apache.hbase', module: 'hbase-annotations')
}
compile(group: 'org.apache.hbase', name: 'hbase-client', version: hbaseVersion) {
exclude(group: 'org.apache.hbase', module: 'hbase-annotations')
exclude(group: 'io.netty', module: 'netty')
}
compile(group: 'org.apache.hbase', name: 'hbase-server', version: hbaseVersion) {
exclude(group: 'org.apache.hbase', module: 'hbase-annotations')
exclude(group: 'org.apache.hadoop', module: 'hadoop-core')
exclude(group: 'org.apache.hadoop', module: 'hadoop-client')
exclude(group: 'org.apache.hadoop', module: 'hadoop-mapreduce-client-jobclient')
exclude(group: 'org.apache.hadoop', module: 'hadoop-mapreduce-client-core')
exclude(group: 'org.apache.hadoop', module: 'hadoop-auth')
exclude(group: 'org.apache.hadoop', module: 'hadoop-annotations')
exclude(group: 'org.apache.hadoop', module: 'hadoop-hdfs')
exclude(group: 'org.apache.hbase', module: 'hbase-hadoop1-compat')
exclude(group: 'commons-math', module: 'commons-math')
exclude(group: 'org.slf4j', module: 'slf4j-api')
exclude(group: 'com.sun.jersey', module: 'jersey-server')
exclude(group: 'com.sun.jersey', module: 'jersey-core')
exclude(group: 'com.sun.jersey', module: 'jersey-json')
exclude(group: 'commons-io', module: 'commons-io')
}
compile 'org.cloudera.htrace:htrace-core:2.05'

compile 'com.google.guava:guava:14.0.1'
compile 'xml-apis:xml-apis:1.4.01'
compile('xom:xom:1.2.5') {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.ConnectTimeoutException;
Expand All @@ -68,6 +67,8 @@
*/
public class HDFSStoreImpl implements HDFSStore {
public static final String ALLOW_STANDALONE_HDFS_FILESYSTEM_PROP = "gemfire.ALLOW_STANDALONE_HDFS_FILESYSTEM";
public static final int DEFAULT_BLOCKSIZE_SMALL = 8 * 1024;

private final boolean ALLOW_TEST_FILE_SYSTEM = Boolean.getBoolean(ALLOW_STANDALONE_HDFS_FILESYSTEM_PROP);
final LogWriterI18n logger;

Expand Down Expand Up @@ -125,7 +126,7 @@ public HDFSStoreImpl(String name, final HDFSStore config) {
// this.blockCache = new LruBlockCache(cacheSize,
// StoreFile.DEFAULT_BLOCKSIZE_SMALL, hconf, HFileSortedOplogFactory.convertStatistics(stats));
this.blockCache = new LruBlockCache(cacheSize,
StoreFile.DEFAULT_BLOCKSIZE_SMALL, hconf);
DEFAULT_BLOCKSIZE_SMALL, hconf);
} else {
this.blockCache = null;
}
Expand Down Expand Up @@ -205,7 +206,8 @@ private FileSystem createFileSystem(Configuration hconf, String configFile, bool
new String[]{"org.apache.hadoop.io.serializer.WritableSerialization"});
// create writer

SchemaMetrics.configureGlobally(hconf);
// [sumedh] should not be required with the new metrics2
// SchemaMetrics.configureGlobally(hconf);

String nameNodeURL = null;
if ((nameNodeURL = getNameNodeURL()) == null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreImpl;
import com.gemstone.gemfire.cache.hdfs.internal.cardinality.HyperLogLog;
import com.gemstone.gemfire.cache.hdfs.internal.cardinality.ICardinality;
import com.gemstone.gemfire.internal.cache.persistence.soplog.ByteComparator;
import com.gemstone.gemfire.internal.cache.persistence.soplog.DelegatingSerializedComparator;
import com.gemstone.gemfire.internal.cache.persistence.soplog.HFileStoreStatistics;
import com.gemstone.gemfire.internal.cache.persistence.soplog.SortedOplogStatistics;
Expand All @@ -47,16 +48,18 @@
import com.gemstone.gemfire.internal.util.SingletonValue.SingletonBuilder;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.BloomFilterWriter;
import org.apache.hadoop.ipc.RemoteException;
Expand Down Expand Up @@ -215,12 +218,15 @@ public HFileSortedOplogWriter(int keys) throws IOException {
Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
HoplogConfig.COMPRESSION_DEFAULT));

// ByteComparator bc = new ByteComparator();
writer = HFile.getWriterFactory(conf, cacheConf)
.withPath(fsProvider.getFS(), path)
ByteComparator bc = new ByteComparator();
HFileContext hcontext = new HFileContextBuilder()
.withBlockSize(hfileBlockSize)
// .withComparator(bc)
.withCompression(compress)
.build();
writer = HFile.getWriterFactory(conf, cacheConf)
.withPath(fsProvider.getFS(), path)
.withFileContext(hcontext)
.withComparator(bc)
.create();
bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
writer);
Expand Down Expand Up @@ -382,11 +388,11 @@ class HFileReader implements HoplogReader, Closeable {
private final Map<byte[], byte[]> fileInfo;
private final HyperLogLog estimator;
private final FileSystem previousFS;

public HFileReader() throws IOException {
try {
FileSystem fs = fsProvider.getFS();
reader = HFile.createReader(fs, path, cacheConf);
reader = HFile.createReader(fs, path, cacheConf, conf);
fileInfo = reader.loadFileInfo();
closed = new AtomicBoolean(false);

Expand Down Expand Up @@ -727,7 +733,8 @@ public HFileSortedIterator(HFileScanner scan, byte[] from, boolean fromInclusive
}

assert from == null || to == null
|| scan.getReader().getComparator().compare(from, to) <= 0;
|| scan.getReader().getComparator().compare(
from, 0, from.length, to, 0, to.length) <= 0;

initIterator();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;

/**
* An iterator that iterates over a split in a read/write hoplog
Expand All @@ -38,7 +37,8 @@ public RWSplitIterator(FileSystem fs, Path[] path, long[] start, long[] len, lon

@Override
protected AbstractHoplog getHoplog(FileSystem fs, Path path) throws IOException {
SchemaMetrics.configureGlobally(fs.getConf());
// [sumedh] should not be required with the new metrics2
// SchemaMetrics.configureGlobally(fs.getConf());
return HFileSortedOplog.getHoplogForLoner(fs, path);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,7 @@
*
* @author bakera
*/
public class ArraySerializedComparator implements CompositeSerializedComparator,
DelegatingSerializedComparator {
public class ArraySerializedComparator extends DelegatingSerializedComparator {

/** the comparators */
private volatile SerializedComparator[] comparators;
Expand All @@ -52,11 +51,6 @@ public class ArraySerializedComparator implements CompositeSerializedComparator,
public void setComparators(SerializedComparator[] comparators) {
this.comparators = comparators;
}

@Override
public int compare(byte[] o1, byte[] o2) {
return compare(o1, 0, o1.length, o2, 0, o2.length);
}

@Override
public int compare(byte[] b1, int o1, int l1, byte[] b2, int o2, int l2) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,24 +23,20 @@
/**
* Compares objects byte-by-byte. This is fast and sufficient for cases when
* lexicographic ordering is not important or the serialization is order-
* preserving.
*
* preserving.
*
* @author bakera
*/
public class ByteComparator implements SerializedComparator {
@Override
public int compare(byte[] rhs, byte[] lhs) {
return compare(rhs, 0, rhs.length, lhs, 0, lhs.length);
}
public class ByteComparator extends SerializedComparator {

@Override
public int compare(byte[] r, int rOff, int rLen, byte[] l, int lOff, int lLen) {
return compareBytes(r, rOff, rLen, l, lOff, lLen);
}

/**
* Compares two byte arrays element-by-element.
*
*
* @param r the right array
* @param rOff the offset of r
* @param rLen the length of r to compare
Expand All @@ -49,8 +45,8 @@ public int compare(byte[] r, int rOff, int rLen, byte[] l, int lOff, int lLen) {
* @param lLen the length of l to compare
* @return -1 if r < l; 0 if r == l; 1 if r > 1
*/

public static int compareBytes(byte[] r, int rOff, int rLen, byte[] l, int lOff, int lLen) {
public static int compareBytes(byte[] r, int rOff, int rLen, byte[] l,
int lOff, int lLen) {
return Bytes.compareTo(r, rOff, rLen, l, lOff, lLen);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,36 +22,39 @@

/**
* Creates and compares composite keys.
*
*
* @author bakera
*/
public interface CompositeSerializedComparator extends SerializedComparator {
public abstract class CompositeSerializedComparator
extends SerializedComparator {

/**
* Constructs a composite key consisting of a primary key and a secondary key.
*
*
* @param key1 the primary key
* @param key2 the secondary key
* @return the composite key
*/
public byte[] createCompositeKey(byte[] key1, byte[] key2);
public abstract byte[] createCompositeKey(byte[] key1, byte[] key2);

/**
* Constructs a composite key by combining the supplied keys. The number of
* keys and their order must match the comparator set.
* <p>
* The <code>WILDCARD_KEY</code> token may be used to match all subkeys in the
* given ordinal position. This is useful when constructing a search key to
* retrieve all keys for a given primary key, ignoring the remaining subkeys.
*
*
* @param keys the keys, ordered by sort priority
* @return the composite key
*/
public byte[] createCompositeKey(byte[]... keys);
public abstract byte[] createCompositeKey(byte[]... keys);

/**
* Returns subkey for the given ordinal position.
*
* @param key the composite key
* @return the subkey
*/
public ByteBuffer getKey(ByteBuffer key, int ordinal);
public abstract ByteBuffer getKey(ByteBuffer key, int ordinal);
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,19 +20,23 @@

/**
* Delegates object comparisons to one or more embedded comparators.
*
*
* @author bakera
*/
public interface DelegatingSerializedComparator extends SerializedComparator {
public abstract class DelegatingSerializedComparator
extends CompositeSerializedComparator {

/**
* Injects the embedded comparators.
*
* @param comparators the comparators for delegation
*/
void setComparators(SerializedComparator[] comparators);
public abstract void setComparators(SerializedComparator[] comparators);

/**
* Returns the embedded comparators.
*
* @return the comparators
*/
SerializedComparator[] getComparators();
public abstract SerializedComparator[] getComparators();
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,8 @@
*
* @author bakera
*/
public class IndexSerializedComparator implements CompositeSerializedComparator,
DelegatingSerializedComparator {

public class IndexSerializedComparator extends DelegatingSerializedComparator {

private volatile SerializedComparator primary;
private volatile SerializedComparator secondary;

Expand All @@ -58,11 +57,6 @@ public SerializedComparator[] getComparators() {
return new SerializedComparator[] { primary, secondary };
}

@Override
public int compare(byte[] o1, byte[] o2) {
return compare(o1, 0, o1.length, o2, 0, o2.length);
}

@Override
public int compare(byte[] b1, int o1, int l1, byte[] b2, int o2, int l2) {
int klen1 = Bytes.getVarInt(b1, o1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@
*
* @author bakera
*/
public class LexicographicalComparator implements SerializedComparator {
public class LexicographicalComparator extends SerializedComparator {

//////////////////////////////////////////////////////////////////////////////
//
Expand Down Expand Up @@ -113,12 +113,7 @@ public class LexicographicalComparator implements SerializedComparator {
private static final int STRING_TO_STRING_BYTES = DSCODE.STRING << 8 | DSCODE.STRING_BYTES;
private static final int STRING_BYTES_TO_STRING = DSCODE.STRING_BYTES << 8 | DSCODE.STRING;
private static final int STRING_BYTES_TO_STRING_BYTES = DSCODE.STRING_BYTES << 8 | DSCODE.STRING_BYTES;

@Override
public int compare(byte[] o1, byte[] o2) {
return compare(o1, 0, o1.length, o2, 0, o2.length);
}


@Override
public int compare(byte[] b1, int o1, int l1, byte[] b2, int o2, int l2) {
byte type1 = b1[o1];
Expand Down
Loading

0 comments on commit 410123e

Please sign in to comment.