org.apache.hadoop.conf.Configurationcom.salesforce.phoenix.util.ReadOnlyProps



Project forcedotcom/phoenix in file ...iterate.DefaultParallelIteratorRegionSplitter.java (2013-06-06)
@@ -31,7 +31,6 @@ import java.sql.SQLException;
 import java.util.*;
 import java.util.Map.Entry;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -43,6 +42,7 @@ import com.salesforce.phoenix.compile.StatementContext;
 import com.salesforce.phoenix.parse.HintNode.Hint;
 import com.salesforce.phoenix.query.*;
 import com.salesforce.phoenix.schema.TableRef;
+import com.salesforce.phoenix.util.ReadOnlyProps;
 
 
 /**
@@ -67,14 +67,14 @@ public class DefaultParallelIteratorRegionSplitter implements ParallelIteratorRe
     protected DefaultParallelIteratorRegionSplitter(StatementContext context, TableRef table) {
         this.context = context;
         this.table = table;
-        Configuration config = context.getConnection().getQueryServices().getConfig();
-        this.targetConcurrency = config.getInt(QueryServices.TARGET_QUERY_CONCURRENCY_ATTRIB,
+        ReadOnlyProps props = context.getConnection().getQueryServices().getProps();
+        this.targetConcurrency = props.getInt(QueryServices.TARGET_QUERY_CONCURRENCY_ATTRIB,
                 QueryServicesOptions.DEFAULT_TARGET_QUERY_CONCURRENCY);
-        this.maxConcurrency = config.getInt(QueryServices.MAX_QUERY_CONCURRENCY_ATTRIB,
+        this.maxConcurrency = props.getInt(QueryServices.MAX_QUERY_CONCURRENCY_ATTRIB,
                 QueryServicesOptions.DEFAULT_MAX_QUERY_CONCURRENCY);
         Preconditions.checkArgument(targetConcurrency >= 1, "Invalid target concurrency: " + targetConcurrency);
         Preconditions.checkArgument(maxConcurrency >= targetConcurrency , "Invalid max concurrency: " + maxConcurrency);
-        this.maxIntraRegionParallelization = context.hasHint(Hint.NO_INTRA_REGION_PARALLELIZATION) ? 1 : config.getInt(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB,
+        this.maxIntraRegionParallelization = context.hasHint(Hint.NO_INTRA_REGION_PARALLELIZATION) ? 1 : props.getInt(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB,
                 QueryServicesOptions.DEFAULT_MAX_INTRA_REGION_PARALLELIZATION);
         Preconditions.checkArgument(maxIntraRegionParallelization >= 1 , "Invalid max intra region parallelization: " + maxIntraRegionParallelization);
     }
Project forcedotcom/phoenix in file ...alesforce.phoenix.query.BaseQueryServicesImpl.java (2013-06-06)
@@ -29,11 +29,10 @@ package com.salesforce.phoenix.query;
 
 import java.util.concurrent.ExecutorService;
 
-import org.apache.hadoop.conf.Configuration;
-
 import com.salesforce.phoenix.job.JobManager;
 import com.salesforce.phoenix.memory.GlobalMemoryManager;
 import com.salesforce.phoenix.memory.MemoryManager;
+import com.salesforce.phoenix.util.ReadOnlyProps;
 
 
 
@@ -47,10 +46,9 @@ import com.salesforce.phoenix.memory.MemoryManager;
 public abstract class BaseQueryServicesImpl implements QueryServices {
     private final ExecutorService executor;
     private final MemoryManager memoryManager;
-    private final Configuration config;
+    private final ReadOnlyProps props;
     
     public BaseQueryServicesImpl(QueryServicesOptions options) {
-        this.config = options.getConfiguration();
         this.executor =  JobManager.createThreadPoolExec(
                 options.getKeepAliveMs(), 
                 options.getThreadPoolSize(), 
@@ -58,6 +56,7 @@ public abstract class BaseQueryServicesImpl implements QueryServices {
         this.memoryManager = new GlobalMemoryManager(
                 Runtime.getRuntime().totalMemory() * options.getMaxMemoryPerc() / 100,
                 options.getMaxMemoryWaitMs());
+        this.props = options.getProps();
     }
     
     @Override
@@ -71,8 +70,8 @@ public abstract class BaseQueryServicesImpl implements QueryServices {
     }
 
     @Override
-    public Configuration getConfig() {
-        return config;
+    public final ReadOnlyProps getProps() {
+        return props;
     }
 
     @Override
Project forcedotcom/phoenix in file ...alesforce.phoenix.query.DelegateQueryServices.java (2013-06-06)
@@ -30,9 +30,8 @@ package com.salesforce.phoenix.query;
 import java.sql.SQLException;
 import java.util.concurrent.ExecutorService;
 
-import org.apache.hadoop.conf.Configuration;
-
 import com.salesforce.phoenix.memory.MemoryManager;
+import com.salesforce.phoenix.util.ReadOnlyProps;
 
 
 
@@ -56,11 +55,6 @@ public class DelegateQueryServices implements QueryServices {
     }
     
     @Override
-    public Configuration getConfig() {
-        return parent.getConfig();
-    }
-
-    @Override
     public ExecutorService getExecutor() {
         return parent.getExecutor();
     }
@@ -74,4 +68,9 @@ public class DelegateQueryServices implements QueryServices {
     public void close() throws SQLException {
         parent.close();
     }
+
+    @Override
+    public ReadOnlyProps getProps() {
+        return parent.getProps();
+    }
 }
Project forcedotcom/phoenix in file ...va.com.salesforce.phoenix.query.QueryServices.java (2013-06-06)
@@ -29,10 +29,10 @@ package com.salesforce.phoenix.query;
 
 import java.util.concurrent.ExecutorService;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.http.annotation.Immutable;
 
 import com.salesforce.phoenix.memory.MemoryManager;
+import com.salesforce.phoenix.util.ReadOnlyProps;
 import com.salesforce.phoenix.util.SQLCloseable;
 
 
@@ -155,12 +155,6 @@ public interface QueryServices extends SQLCloseable {
     public static final String CALL_QUEUE_ROUND_ROBIN_ATTRIB = "ipc.server.callqueue.roundrobin";
     public static final String SCAN_CACHE_SIZE_ATTRIB = "hbase.client.scanner.caching";
     public static final String MAX_MUTATION_SIZE_ATTRIB = "phoenix.mutate.maxSize";
-    /**
-     * Use {@link #MUTATE_BATCH_SIZE_ATTRIB} instead
-     * @deprecated
-     */
-    @Deprecated
-    public static final String UPSERT_BATCH_SIZE_ATTRIB = "phoenix.mutate.upsertBatchSize";
     public static final String MUTATE_BATCH_SIZE_ATTRIB = "phoenix.mutate.batchSize";
     public static final String REGION_BOUNDARY_CACHE_TTL_MS_ATTRIB = "phoenix.query.regionBoundaryCacheTTL";
     public static final String MAX_HASH_CACHE_TIME_TO_LIVE_MS = "phoenix.coprocessor.maxHashCacheTimeToLiveMs";
@@ -169,6 +163,12 @@ public interface QueryServices extends SQLCloseable {
 
     public static final String CALL_QUEUE_PRODUCER_ATTRIB_NAME = "CALL_QUEUE_PRODUCER";
     
+    public static final String MASTER_INFO_PORT_ATTRIB = "hbase.master.info.port";
+    public static final String REGIONSERVER_INFO_PORT_ATTRIB = "hbase.regionserver.info.port";
+    public static final String REGIONSERVER_LEASE_PERIOD_ATTRIB = "hbase.regionserver.lease.period";
+    public static final String RPC_TIMEOUT_ATTRIB = "hbase.rpc.timeout";
+
+    
     /**
      * Get executor service used for parallel scans
      */
@@ -179,7 +179,8 @@ public interface QueryServices extends SQLCloseable {
     public MemoryManager getMemoryManager();
     
     /**
-     * Get the configuration
+     * Get the properties from the HBase configuration in a
+     * read-only structure that avoids any synchronization
      */
-    public Configuration getConfig();
+    public ReadOnlyProps getProps();
 }
Project forcedotcom/phoenix in file ...alesforce.phoenix.query.QueryServicesTestImpl.java (2013-06-06)
@@ -29,8 +29,7 @@ package com.salesforce.phoenix.query;
 
 import static com.salesforce.phoenix.query.QueryServicesOptions.withDefaults;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
+import com.salesforce.phoenix.util.ReadOnlyProps;
 
 
 /**
@@ -39,7 +38,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
  * @author jtaylor
  * @since 0.1
  */
-public class QueryServicesTestImpl extends BaseQueryServicesImpl {
+public final class QueryServicesTestImpl extends BaseQueryServicesImpl {
 
     private static final int DEFAULT_THREAD_POOL_SIZE = 8;
     private static final int DEFAULT_QUEUE_SIZE = 0;
@@ -54,12 +53,17 @@ public class QueryServicesTestImpl extends BaseQueryServicesImpl {
     private static final int DEFAULT_TARGET_QUERY_CONCURRENCY = 4;
     private static final int DEFAULT_MAX_QUERY_CONCURRENCY = 8;
     
+    private static final int DEFAULT_MASTER_INFO_PORT = -1;
+    private static final int DEFAULT_REGIONSERVER_INFO_PORT = -1;
+    private static final int DEFAULT_REGIONSERVER_LEASE_PERIOD_MS = 9000000;
+    private static final int DEFAULT_RPC_TIMEOUT_MS = 9000000;
+    
     public QueryServicesTestImpl() {
-        this(HBaseConfiguration.create());
+        this(ReadOnlyProps.EMPTY_PROPS);
     }
     
-    public QueryServicesTestImpl(Configuration config) {
-        this(withDefaults(config)
+    public QueryServicesTestImpl(ReadOnlyProps overrideProps) {
+        super(withDefaults()
                 .setThreadPoolSize(DEFAULT_THREAD_POOL_SIZE)
                 .setQueueSize(DEFAULT_QUEUE_SIZE)
                 .setMaxMemoryPerc(DEFAULT_MAX_MEMORY_PERC)
@@ -71,16 +75,12 @@ public class QueryServicesTestImpl extends BaseQueryServicesImpl {
                 .setTargetQueryConcurrency(DEFAULT_TARGET_QUERY_CONCURRENCY)
                 .setMaxQueryConcurrency(DEFAULT_MAX_QUERY_CONCURRENCY)
                 .setRowKeyOrderSaltedTable(true)
+                .setMaxHashCacheTTLMs(DEFAULT_MAX_HASH_CACHE_TIME_TO_LIVE_MS)
+                .setMasterInfoPort(DEFAULT_MASTER_INFO_PORT)
+                .setRegionServerInfoPort(DEFAULT_REGIONSERVER_INFO_PORT)
+                .setRegionServerLeasePeriodMs(DEFAULT_REGIONSERVER_LEASE_PERIOD_MS)
+                .setRpcTimeoutMs(DEFAULT_RPC_TIMEOUT_MS)
+                .setAll(overrideProps)
         );
     }    
-   
-    public QueryServicesTestImpl(QueryServicesOptions options) {
-        super(options);
-        getConfig().setIfUnset(QueryServices.MAX_HASH_CACHE_TIME_TO_LIVE_MS, Integer.toString(DEFAULT_MAX_HASH_CACHE_TIME_TO_LIVE_MS));
-        getConfig().setInt("hbase.master.info.port", -1); // To allow tests to run while local hbase is running too
-        getConfig().setInt("hbase.regionserver.info.port", -1);
-        getConfig().set("hbase.regionserver.lease.period" , "9000000"); // Increase so that we don't get timeouts while debugging
-        getConfig().set("hbase.rpc.timeout" , "9000000");
-
-    }    
 }