org.apache.hadoop.fs.FileSystem



API Populatity

36 Client projects

Project: org.apache

Project apache/hbase in file ...apache.hadoop.hbase.master.HMasterCommandLine.java (2014-05-28)
@@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
 import org.apache.hadoop.hbase.MasterNotRunningException;
@@ -178,6 +179,7 @@ public class HMasterCommandLine extends ServerCommandLine {
         }
         conf.set(HConstants.ZOOKEEPER_CLIENT_PORT,
                  Integer.toString(clientPort));
+        conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 10 *1000);
         // Need to have the zk cluster shutdown when master is shutdown.
         // Run a subclass that does the zk cluster shutdown on its way out.
         LocalHBaseCluster cluster = new LocalHBaseCluster(conf, conf.getInt("hbase.masters", 1),
Project apache/hbase in file ...java.org.apache.hadoop.hbase.mapreduce.Export.java (2013-04-09)
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.client.Result;
@@ -78,7 +79,7 @@ public class Export {
     job.setOutputFormatClass(SequenceFileOutputFormat.class);
     job.setOutputKeyClass(ImmutableBytesWritable.class);
     job.setOutputValueClass(Result.class);
-    FileOutputFormat.setOutputPath(job, outputDir);
+    FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't have a default fs.
     return job;
   }
 
Project apache/hive in file ...adoop.hive.ql.io.avro.AvroGenericRecordReader.java (2014-05-08)
@@ -31,7 +31,6 @@
 import org.apache.avro.mapred.FsInput;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.plan.MapWork;
@@ -94,7 +93,6 @@ public AvroGenericRecordReader(JobConf job, FileSplit split, Reporter reporter)
    * @throws AvroSerdeException
    */
   private Schema getSchema(JobConf job, FileSplit split) throws AvroSerdeException, IOException {
-    FileSystem fs = split.getPath().getFileSystem(job);
     // Inside of a MR job, we can pull out the actual properties
     if(AvroSerdeUtils.insideMRJob(job)) {
       MapWork mapWork = Utilities.getMapWork(job);
@@ -155,6 +153,7 @@ public boolean next(NullWritable nullWritable, AvroGenericRecordWritable record)
     GenericData.Record r = (GenericData.Record)reader.next();
     record.setRecord(r);
     record.setRecordReaderID(recordReaderID);
+    record.setFileSchema(reader.getSchema());
 
     return true;
   }