001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.mapred;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.InetSocketAddress;
023import java.net.URL;
024import java.security.PrivilegedExceptionAction;
025import java.util.ArrayList;
026import java.util.Collection;
027import java.util.List;
028
029import org.apache.hadoop.classification.InterfaceAudience;
030import org.apache.hadoop.classification.InterfaceStability;
031import org.apache.hadoop.conf.Configuration;
032import org.apache.hadoop.fs.FileStatus;
033import org.apache.hadoop.fs.FileSystem;
034import org.apache.hadoop.fs.Path;
035import org.apache.hadoop.io.Text;
036import org.apache.hadoop.mapred.ClusterStatus.BlackListInfo;
037import org.apache.hadoop.mapreduce.Cluster;
038import org.apache.hadoop.mapreduce.ClusterMetrics;
039import org.apache.hadoop.mapreduce.Job;
040import org.apache.hadoop.mapreduce.MRJobConfig;
041import org.apache.hadoop.mapreduce.QueueInfo;
042import org.apache.hadoop.mapreduce.TaskTrackerInfo;
043import org.apache.hadoop.mapreduce.TaskType;
044import org.apache.hadoop.mapreduce.filecache.DistributedCache;
045import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
046import org.apache.hadoop.mapreduce.tools.CLI;
047import org.apache.hadoop.mapreduce.util.ConfigUtil;
048import org.apache.hadoop.security.UserGroupInformation;
049import org.apache.hadoop.security.token.SecretManager.InvalidToken;
050import org.apache.hadoop.security.token.Token;
051import org.apache.hadoop.security.token.TokenRenewer;
052import org.apache.hadoop.util.Tool;
053import org.apache.hadoop.util.ToolRunner;
054
055/**
056 * <code>JobClient</code> is the primary interface for the user-job to interact
057 * with the cluster.
058 * 
059 * <code>JobClient</code> provides facilities to submit jobs, track their 
060 * progress, access component-tasks' reports/logs, get the Map-Reduce cluster
061 * status information etc.
062 * 
063 * <p>The job submission process involves:
064 * <ol>
065 *   <li>
066 *   Checking the input and output specifications of the job.
067 *   </li>
068 *   <li>
069 *   Computing the {@link InputSplit}s for the job.
070 *   </li>
071 *   <li>
072 *   Setup the requisite accounting information for the {@link DistributedCache} 
073 *   of the job, if necessary.
074 *   </li>
075 *   <li>
076 *   Copying the job's jar and configuration to the map-reduce system directory 
077 *   on the distributed file-system. 
078 *   </li>
079 *   <li>
080 *   Submitting the job to the cluster and optionally monitoring
081 *   it's status.
082 *   </li>
083 * </ol>
084 *  
085 * Normally the user creates the application, describes various facets of the
086 * job via {@link JobConf} and then uses the <code>JobClient</code> to submit 
087 * the job and monitor its progress.
088 * 
089 * <p>Here is an example on how to use <code>JobClient</code>:</p>
090 * <p><blockquote><pre>
091 *     // Create a new JobConf
092 *     JobConf job = new JobConf(new Configuration(), MyJob.class);
093 *     
094 *     // Specify various job-specific parameters     
095 *     job.setJobName("myjob");
096 *     
097 *     job.setInputPath(new Path("in"));
098 *     job.setOutputPath(new Path("out"));
099 *     
100 *     job.setMapperClass(MyJob.MyMapper.class);
101 *     job.setReducerClass(MyJob.MyReducer.class);
102 *
103 *     // Submit the job, then poll for progress until the job is complete
104 *     JobClient.runJob(job);
105 * </pre></blockquote>
106 * 
107 * <b id="JobControl">Job Control</b>
108 * 
109 * <p>At times clients would chain map-reduce jobs to accomplish complex tasks 
110 * which cannot be done via a single map-reduce job. This is fairly easy since 
111 * the output of the job, typically, goes to distributed file-system and that 
112 * can be used as the input for the next job.</p>
113 * 
114 * <p>However, this also means that the onus on ensuring jobs are complete 
115 * (success/failure) lies squarely on the clients. In such situations the 
116 * various job-control options are:
117 * <ol>
118 *   <li>
119 *   {@link #runJob(JobConf)} : submits the job and returns only after 
120 *   the job has completed.
121 *   </li>
122 *   <li>
123 *   {@link #submitJob(JobConf)} : only submits the job, then poll the 
124 *   returned handle to the {@link RunningJob} to query status and make 
125 *   scheduling decisions.
126 *   </li>
127 *   <li>
128 *   {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
129 *   on job-completion, thus avoiding polling.
130 *   </li>
131 * </ol>
132 * 
133 * @see JobConf
134 * @see ClusterStatus
135 * @see Tool
136 * @see DistributedCache
137 */
138@InterfaceAudience.Public
139@InterfaceStability.Stable
140public class JobClient extends CLI {
141
142  @InterfaceAudience.Private
143  public static final String MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY =
144      "mapreduce.jobclient.retry.policy.enabled";
145  @InterfaceAudience.Private
146  public static final boolean MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_DEFAULT =
147      false;
148  @InterfaceAudience.Private
149  public static final String MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_KEY =
150      "mapreduce.jobclient.retry.policy.spec";
151  @InterfaceAudience.Private
152  public static final String MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
153      "10000,6,60000,10"; // t1,n1,t2,n2,...
154
155  public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
156  private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; 
157  
158  private int maxRetry = MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES;
159  private long retryInterval =
160      MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL;
161
162  static{
163    ConfigUtil.loadResources();
164  }
165
166  /**
167   * A NetworkedJob is an implementation of RunningJob.  It holds
168   * a JobProfile object to provide some info, and interacts with the
169   * remote service to provide certain functionality.
170   */
171  static class NetworkedJob implements RunningJob {
172    Job job;
173    /**
174     * We store a JobProfile and a timestamp for when we last
175     * acquired the job profile.  If the job is null, then we cannot
176     * perform any of the tasks.  The job might be null if the cluster
177     * has completely forgotten about the job.  (eg, 24 hours after the
178     * job completes.)
179     */
180    public NetworkedJob(JobStatus status, Cluster cluster) throws IOException {
181      this(status, cluster, new JobConf(status.getJobFile()));
182    }
183    
184    private NetworkedJob(JobStatus status, Cluster cluster, JobConf conf)
185        throws IOException {
186      this(Job.getInstance(cluster, status, conf));
187    }
188
189    public NetworkedJob(Job job) throws IOException {
190      this.job = job;
191    }
192
193    public Configuration getConfiguration() {
194      return job.getConfiguration();
195    }
196
197    /**
198     * An identifier for the job
199     */
200    public JobID getID() {
201      return JobID.downgrade(job.getJobID());
202    }
203    
204    /** @deprecated This method is deprecated and will be removed. Applications should 
205     * rather use {@link #getID()}.*/
206    @Deprecated
207    public String getJobID() {
208      return getID().toString();
209    }
210    
211    /**
212     * The user-specified job name
213     */
214    public String getJobName() {
215      return job.getJobName();
216    }
217
218    /**
219     * The name of the job file
220     */
221    public String getJobFile() {
222      return job.getJobFile();
223    }
224
225    /**
226     * A URL where the job's status can be seen
227     */
228    public String getTrackingURL() {
229      return job.getTrackingURL();
230    }
231
232    /**
233     * A float between 0.0 and 1.0, indicating the % of map work
234     * completed.
235     */
236    public float mapProgress() throws IOException {
237      return job.mapProgress();
238    }
239
240    /**
241     * A float between 0.0 and 1.0, indicating the % of reduce work
242     * completed.
243     */
244    public float reduceProgress() throws IOException {
245      return job.reduceProgress();
246    }
247
248    /**
249     * A float between 0.0 and 1.0, indicating the % of cleanup work
250     * completed.
251     */
252    public float cleanupProgress() throws IOException {
253      try {
254        return job.cleanupProgress();
255      } catch (InterruptedException ie) {
256        throw new IOException(ie);
257      }
258    }
259
260    /**
261     * A float between 0.0 and 1.0, indicating the % of setup work
262     * completed.
263     */
264    public float setupProgress() throws IOException {
265      return job.setupProgress();
266    }
267
268    /**
269     * Returns immediately whether the whole job is done yet or not.
270     */
271    public synchronized boolean isComplete() throws IOException {
272      return job.isComplete();
273    }
274
275    /**
276     * True iff job completed successfully.
277     */
278    public synchronized boolean isSuccessful() throws IOException {
279      return job.isSuccessful();
280    }
281
282    /**
283     * Blocks until the job is finished
284     */
285    public void waitForCompletion() throws IOException {
286      try {
287        job.waitForCompletion(false);
288      } catch (InterruptedException ie) {
289        throw new IOException(ie);
290      } catch (ClassNotFoundException ce) {
291        throw new IOException(ce);
292      }
293    }
294
295    /**
296     * Tells the service to get the state of the current job.
297     */
298    public synchronized int getJobState() throws IOException {
299      try {
300        return job.getJobState().getValue();
301      } catch (InterruptedException ie) {
302        throw new IOException(ie);
303      }
304    }
305    
306    /**
307     * Tells the service to terminate the current job.
308     */
309    public synchronized void killJob() throws IOException {
310      job.killJob();
311    }
312   
313    
314    /** Set the priority of the job.
315    * @param priority new priority of the job. 
316    */
317    public synchronized void setJobPriority(String priority) 
318                                                throws IOException {
319      try {
320        job.setPriority(
321          org.apache.hadoop.mapreduce.JobPriority.valueOf(priority));
322      } catch (InterruptedException ie) {
323        throw new IOException(ie);
324      }
325    }
326    
327    /**
328     * Kill indicated task attempt.
329     * @param taskId the id of the task to kill.
330     * @param shouldFail if true the task is failed and added to failed tasks list, otherwise
331     * it is just killed, w/o affecting job failure status.
332     */
333    public synchronized void killTask(TaskAttemptID taskId,
334        boolean shouldFail) throws IOException {
335      if (shouldFail) {
336        job.failTask(taskId);
337      } else {
338        job.killTask(taskId);
339      }
340    }
341
342    /** @deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}*/
343    @Deprecated
344    public synchronized void killTask(String taskId, boolean shouldFail) throws IOException {
345      killTask(TaskAttemptID.forName(taskId), shouldFail);
346    }
347    
348    /**
349     * Fetch task completion events from cluster for this job. 
350     */
351    public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
352        int startFrom) throws IOException {
353      try {
354        org.apache.hadoop.mapreduce.TaskCompletionEvent[] acls = 
355          job.getTaskCompletionEvents(startFrom, 10);
356        TaskCompletionEvent[] ret = new TaskCompletionEvent[acls.length];
357        for (int i = 0 ; i < acls.length; i++ ) {
358          ret[i] = TaskCompletionEvent.downgrade(acls[i]);
359        }
360        return ret;
361      } catch (InterruptedException ie) {
362        throw new IOException(ie);
363      }
364    }
365
366    /**
367     * Dump stats to screen
368     */
369    @Override
370    public String toString() {
371      return job.toString();
372    }
373        
374    /**
375     * Returns the counters for this job
376     */
377    public Counters getCounters() throws IOException {
378      Counters result = null;
379      org.apache.hadoop.mapreduce.Counters temp = job.getCounters();
380      if(temp != null) {
381        result = Counters.downgrade(temp);
382      }
383      return result;
384    }
385    
386    @Override
387    public String[] getTaskDiagnostics(TaskAttemptID id) throws IOException {
388      try { 
389        return job.getTaskDiagnostics(id);
390      } catch (InterruptedException ie) {
391        throw new IOException(ie);
392      }
393    }
394
395    public String getHistoryUrl() throws IOException {
396      try {
397        return job.getHistoryUrl();
398      } catch (InterruptedException ie) {
399        throw new IOException(ie);
400      }
401    }
402
403    public boolean isRetired() throws IOException {
404      try {
405        return job.isRetired();
406      } catch (InterruptedException ie) {
407        throw new IOException(ie);
408      }
409    }
410    
411    boolean monitorAndPrintJob() throws IOException, InterruptedException {
412      return job.monitorAndPrintJob();
413    }
414    
415    @Override
416    public String getFailureInfo() throws IOException {
417      try {
418        return job.getStatus().getFailureInfo();
419      } catch (InterruptedException ie) {
420        throw new IOException(ie);
421      }
422    }
423
424    @Override
425    public JobStatus getJobStatus() throws IOException {
426      try {
427        return JobStatus.downgrade(job.getStatus());
428      } catch (InterruptedException ie) {
429        throw new IOException(ie);
430      }
431    }
432  }
433
434  /**
435   * Ugi of the client. We store this ugi when the client is created and 
436   * then make sure that the same ugi is used to run the various protocols.
437   */
438  UserGroupInformation clientUgi;
439  
440  /**
441   * Create a job client.
442   */
443  public JobClient() {
444  }
445    
446  /**
447   * Build a job client with the given {@link JobConf}, and connect to the 
448   * default cluster
449   * 
450   * @param conf the job configuration.
451   * @throws IOException
452   */
453  public JobClient(JobConf conf) throws IOException {
454    init(conf);
455  }
456
457  /**
458   * Build a job client with the given {@link Configuration}, 
459   * and connect to the default cluster
460   * 
461   * @param conf the configuration.
462   * @throws IOException
463   */
464  public JobClient(Configuration conf) throws IOException {
465    init(new JobConf(conf));
466  }
467
468  /**
469   * Connect to the default cluster
470   * @param conf the job configuration.
471   * @throws IOException
472   */
473  public void init(JobConf conf) throws IOException {
474    setConf(conf);
475    cluster = new Cluster(conf);
476    clientUgi = UserGroupInformation.getCurrentUser();
477
478    maxRetry = conf.getInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES,
479      MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);
480
481    retryInterval =
482      conf.getLong(MRJobConfig.MR_CLIENT_JOB_RETRY_INTERVAL,
483        MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL);
484
485  }
486
487  /**
488   * Build a job client, connect to the indicated job tracker.
489   * 
490   * @param jobTrackAddr the job tracker to connect to.
491   * @param conf configuration.
492   */
493  public JobClient(InetSocketAddress jobTrackAddr, 
494                   Configuration conf) throws IOException {
495    cluster = new Cluster(jobTrackAddr, conf);
496    clientUgi = UserGroupInformation.getCurrentUser();
497  }
498
499  /**
500   * Close the <code>JobClient</code>.
501   */
502  public synchronized void close() throws IOException {
503    cluster.close();
504  }
505
506  /**
507   * Get a filesystem handle.  We need this to prepare jobs
508   * for submission to the MapReduce system.
509   * 
510   * @return the filesystem handle.
511   */
512  public synchronized FileSystem getFs() throws IOException {
513    try { 
514      return cluster.getFileSystem();
515    } catch (InterruptedException ie) {
516      throw new IOException(ie);
517    }
518  }
519  
520  /**
521   * Get a handle to the Cluster
522   */
523  public Cluster getClusterHandle() {
524    return cluster;
525  }
526  
527  /**
528   * Submit a job to the MR system.
529   * 
530   * This returns a handle to the {@link RunningJob} which can be used to track
531   * the running-job.
532   * 
533   * @param jobFile the job configuration.
534   * @return a handle to the {@link RunningJob} which can be used to track the
535   *         running-job.
536   * @throws FileNotFoundException
537   * @throws InvalidJobConfException
538   * @throws IOException
539   */
540  public RunningJob submitJob(String jobFile) throws FileNotFoundException, 
541                                                     InvalidJobConfException, 
542                                                     IOException {
543    // Load in the submitted job details
544    JobConf job = new JobConf(jobFile);
545    return submitJob(job);
546  }
547    
548  /**
549   * Submit a job to the MR system.
550   * This returns a handle to the {@link RunningJob} which can be used to track
551   * the running-job.
552   * 
553   * @param conf the job configuration.
554   * @return a handle to the {@link RunningJob} which can be used to track the
555   *         running-job.
556   * @throws FileNotFoundException
557   * @throws IOException
558   */
559  public RunningJob submitJob(final JobConf conf) throws FileNotFoundException,
560                                                  IOException {
561    return submitJobInternal(conf);
562  }
563
564  @InterfaceAudience.Private
565  public RunningJob submitJobInternal(final JobConf conf)
566      throws FileNotFoundException, IOException {
567    try {
568      conf.setBooleanIfUnset("mapred.mapper.new-api", false);
569      conf.setBooleanIfUnset("mapred.reducer.new-api", false);
570      Job job = clientUgi.doAs(new PrivilegedExceptionAction<Job> () {
571        @Override
572        public Job run() throws IOException, ClassNotFoundException, 
573          InterruptedException {
574          Job job = Job.getInstance(conf);
575          job.submit();
576          return job;
577        }
578      });
579      // update our Cluster instance with the one created by Job for submission
580      // (we can't pass our Cluster instance to Job, since Job wraps the config
581      // instance, and the two configs would then diverge)
582      cluster = job.getCluster();
583      return new NetworkedJob(job);
584    } catch (InterruptedException ie) {
585      throw new IOException("interrupted", ie);
586    }
587  }
588
589  private Job getJobUsingCluster(final JobID jobid) throws IOException,
590  InterruptedException {
591    return clientUgi.doAs(new PrivilegedExceptionAction<Job>() {
592      public Job run() throws IOException, InterruptedException  {
593       return cluster.getJob(jobid);
594      }
595    });
596  }
597
598  protected RunningJob getJobInner(final JobID jobid) throws IOException {
599    try {
600      
601      Job job = getJobUsingCluster(jobid);
602      if (job != null) {
603        JobStatus status = JobStatus.downgrade(job.getStatus());
604        if (status != null) {
605          return new NetworkedJob(status, cluster,
606              new JobConf(job.getConfiguration()));
607        } 
608      }
609    } catch (InterruptedException ie) {
610      throw new IOException(ie);
611    }
612    return null;
613  }
614
615  /**
616   * Get an {@link RunningJob} object to track an ongoing job.  Returns
617   * null if the id does not correspond to any known job.
618   *
619   * @param jobid the jobid of the job.
620   * @return the {@link RunningJob} handle to track the job, null if the
621   *         <code>jobid</code> doesn't correspond to any known job.
622   * @throws IOException
623   */
624  public RunningJob getJob(final JobID jobid) throws IOException {
625     for (int i = 0;i <= maxRetry;i++) {
626       if (i > 0) {
627         try {
628           Thread.sleep(retryInterval);
629         } catch (Exception e) { }
630       }
631       RunningJob job = getJobInner(jobid);
632       if (job != null) {
633         return job;
634       }
635     }
636     return null;
637  }
638
639  /**@deprecated Applications should rather use {@link #getJob(JobID)}.
640   */
641  @Deprecated
642  public RunningJob getJob(String jobid) throws IOException {
643    return getJob(JobID.forName(jobid));
644  }
645  
646  private static final TaskReport[] EMPTY_TASK_REPORTS = new TaskReport[0];
647  
648  /**
649   * Get the information of the current state of the map tasks of a job.
650   * 
651   * @param jobId the job to query.
652   * @return the list of all of the map tips.
653   * @throws IOException
654   */
655  public TaskReport[] getMapTaskReports(JobID jobId) throws IOException {
656    return getTaskReports(jobId, TaskType.MAP);
657  }
658  
659  private TaskReport[] getTaskReports(final JobID jobId, TaskType type) throws 
660    IOException {
661    try {
662      Job j = getJobUsingCluster(jobId);
663      if(j == null) {
664        return EMPTY_TASK_REPORTS;
665      }
666      return TaskReport.downgradeArray(j.getTaskReports(type));
667    } catch (InterruptedException ie) {
668      throw new IOException(ie);
669    }
670  }
671  
672  /**@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}*/
673  @Deprecated
674  public TaskReport[] getMapTaskReports(String jobId) throws IOException {
675    return getMapTaskReports(JobID.forName(jobId));
676  }
677  
678  /**
679   * Get the information of the current state of the reduce tasks of a job.
680   * 
681   * @param jobId the job to query.
682   * @return the list of all of the reduce tips.
683   * @throws IOException
684   */    
685  public TaskReport[] getReduceTaskReports(JobID jobId) throws IOException {
686    return getTaskReports(jobId, TaskType.REDUCE);
687  }
688
689  /**
690   * Get the information of the current state of the cleanup tasks of a job.
691   * 
692   * @param jobId the job to query.
693   * @return the list of all of the cleanup tips.
694   * @throws IOException
695   */    
696  public TaskReport[] getCleanupTaskReports(JobID jobId) throws IOException {
697    return getTaskReports(jobId, TaskType.JOB_CLEANUP);
698  }
699
700  /**
701   * Get the information of the current state of the setup tasks of a job.
702   * 
703   * @param jobId the job to query.
704   * @return the list of all of the setup tips.
705   * @throws IOException
706   */    
707  public TaskReport[] getSetupTaskReports(JobID jobId) throws IOException {
708    return getTaskReports(jobId, TaskType.JOB_SETUP);
709  }
710
711  
712  /**@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}*/
713  @Deprecated
714  public TaskReport[] getReduceTaskReports(String jobId) throws IOException {
715    return getReduceTaskReports(JobID.forName(jobId));
716  }
717  
718  /**
719   * Display the information about a job's tasks, of a particular type and
720   * in a particular state
721   * 
722   * @param jobId the ID of the job
723   * @param type the type of the task (map/reduce/setup/cleanup)
724   * @param state the state of the task 
725   * (pending/running/completed/failed/killed)
726   * @throws IOException when there is an error communicating with the master
727   * @throws IllegalArgumentException if an invalid type/state is passed
728   */
729  public void displayTasks(final JobID jobId, String type, String state) 
730  throws IOException {
731    try {
732      Job job = getJobUsingCluster(jobId);
733      super.displayTasks(job, type, state);
734    } catch (InterruptedException ie) {
735      throw new IOException(ie);
736    }
737  }
738  
739  /**
740   * Get status information about the Map-Reduce cluster.
741   *  
742   * @return the status information about the Map-Reduce cluster as an object
743   *         of {@link ClusterStatus}.
744   * @throws IOException
745   */
746  public ClusterStatus getClusterStatus() throws IOException {
747    try {
748      return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
749        public ClusterStatus run() throws IOException, InterruptedException {
750          ClusterMetrics metrics = cluster.getClusterStatus();
751          return new ClusterStatus(metrics.getTaskTrackerCount(), metrics
752            .getBlackListedTaskTrackerCount(), cluster
753            .getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
754            metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
755            metrics.getReduceSlotCapacity(), cluster.getJobTrackerStatus(),
756            metrics.getDecommissionedTaskTrackerCount(), metrics
757              .getGrayListedTaskTrackerCount());
758        }
759      });
760    } catch (InterruptedException ie) {
761      throw new IOException(ie);
762    }
763  }
764
765  private  Collection<String> arrayToStringList(TaskTrackerInfo[] objs) {
766    Collection<String> list = new ArrayList<String>();
767    for (TaskTrackerInfo info: objs) {
768      list.add(info.getTaskTrackerName());
769    }
770    return list;
771  }
772
773  private  Collection<BlackListInfo> arrayToBlackListInfo(TaskTrackerInfo[] objs) {
774    Collection<BlackListInfo> list = new ArrayList<BlackListInfo>();
775    for (TaskTrackerInfo info: objs) {
776      BlackListInfo binfo = new BlackListInfo();
777      binfo.setTrackerName(info.getTaskTrackerName());
778      binfo.setReasonForBlackListing(info.getReasonForBlacklist());
779      binfo.setBlackListReport(info.getBlacklistReport());
780      list.add(binfo);
781    }
782    return list;
783  }
784
785  /**
786   * Get status information about the Map-Reduce cluster.
787   *  
788   * @param  detailed if true then get a detailed status including the
789   *         tracker names
790   * @return the status information about the Map-Reduce cluster as an object
791   *         of {@link ClusterStatus}.
792   * @throws IOException
793   */
794  public ClusterStatus getClusterStatus(boolean detailed) throws IOException {
795    try {
796      return clientUgi.doAs(new PrivilegedExceptionAction<ClusterStatus>() {
797        public ClusterStatus run() throws IOException, InterruptedException {
798        ClusterMetrics metrics = cluster.getClusterStatus();
799        return new ClusterStatus(arrayToStringList(cluster.getActiveTaskTrackers()),
800          arrayToBlackListInfo(cluster.getBlackListedTaskTrackers()),
801          cluster.getTaskTrackerExpiryInterval(), metrics.getOccupiedMapSlots(),
802          metrics.getOccupiedReduceSlots(), metrics.getMapSlotCapacity(),
803          metrics.getReduceSlotCapacity(), 
804          cluster.getJobTrackerStatus());
805        }
806      });
807    } catch (InterruptedException ie) {
808      throw new IOException(ie);
809    }
810  }
811    
812
813  /** 
814   * Get the jobs that are not completed and not failed.
815   * 
816   * @return array of {@link JobStatus} for the running/to-be-run jobs.
817   * @throws IOException
818   */
819  public JobStatus[] jobsToComplete() throws IOException {
820    List<JobStatus> stats = new ArrayList<JobStatus>();
821    for (JobStatus stat : getAllJobs()) {
822      if (!stat.isJobComplete()) {
823        stats.add(stat);
824      }
825    }
826    return stats.toArray(new JobStatus[0]);
827  }
828
829  /** 
830   * Get the jobs that are submitted.
831   * 
832   * @return array of {@link JobStatus} for the submitted jobs.
833   * @throws IOException
834   */
835  public JobStatus[] getAllJobs() throws IOException {
836    try {
837      org.apache.hadoop.mapreduce.JobStatus[] jobs = 
838          clientUgi.doAs(new PrivilegedExceptionAction<
839              org.apache.hadoop.mapreduce.JobStatus[]> () {
840            public org.apache.hadoop.mapreduce.JobStatus[] run() 
841                throws IOException, InterruptedException {
842              return cluster.getAllJobStatuses();
843            }
844          });
845      JobStatus[] stats = new JobStatus[jobs.length];
846      for (int i = 0; i < jobs.length; i++) {
847        stats[i] = JobStatus.downgrade(jobs[i]);
848      }
849      return stats;
850    } catch (InterruptedException ie) {
851      throw new IOException(ie);
852    }
853  }
854  
855  /** 
856   * Utility that submits a job, then polls for progress until the job is
857   * complete.
858   * 
859   * @param job the job configuration.
860   * @throws IOException if the job fails
861   */
862  public static RunningJob runJob(JobConf job) throws IOException {
863    JobClient jc = new JobClient(job);
864    RunningJob rj = jc.submitJob(job);
865    try {
866      if (!jc.monitorAndPrintJob(job, rj)) {
867        throw new IOException("Job failed!");
868      }
869    } catch (InterruptedException ie) {
870      Thread.currentThread().interrupt();
871    }
872    return rj;
873  }
874  
875  /**
876   * Monitor a job and print status in real-time as progress is made and tasks 
877   * fail.
878   * @param conf the job's configuration
879   * @param job the job to track
880   * @return true if the job succeeded
881   * @throws IOException if communication to the JobTracker fails
882   */
883  public boolean monitorAndPrintJob(JobConf conf, 
884                                    RunningJob job
885  ) throws IOException, InterruptedException {
886    return ((NetworkedJob)job).monitorAndPrintJob();
887  }
888
889  static String getTaskLogURL(TaskAttemptID taskId, String baseUrl) {
890    return (baseUrl + "/tasklog?plaintext=true&attemptid=" + taskId); 
891  }
892  
893  static Configuration getConfiguration(String jobTrackerSpec)
894  {
895    Configuration conf = new Configuration();
896    if (jobTrackerSpec != null) {        
897      if (jobTrackerSpec.indexOf(":") >= 0) {
898        conf.set("mapred.job.tracker", jobTrackerSpec);
899      } else {
900        String classpathFile = "hadoop-" + jobTrackerSpec + ".xml";
901        URL validate = conf.getResource(classpathFile);
902        if (validate == null) {
903          throw new RuntimeException(classpathFile + " not found on CLASSPATH");
904        }
905        conf.addResource(classpathFile);
906      }
907    }
908    return conf;
909  }
910
911  /**
912   * Sets the output filter for tasks. only those tasks are printed whose
913   * output matches the filter. 
914   * @param newValue task filter.
915   */
916  @Deprecated
917  public void setTaskOutputFilter(TaskStatusFilter newValue){
918    this.taskOutputFilter = newValue;
919  }
920    
921  /**
922   * Get the task output filter out of the JobConf.
923   * 
924   * @param job the JobConf to examine.
925   * @return the filter level.
926   */
927  public static TaskStatusFilter getTaskOutputFilter(JobConf job) {
928    return TaskStatusFilter.valueOf(job.get("jobclient.output.filter", 
929                                            "FAILED"));
930  }
931    
932  /**
933   * Modify the JobConf to set the task output filter.
934   * 
935   * @param job the JobConf to modify.
936   * @param newValue the value to set.
937   */
938  public static void setTaskOutputFilter(JobConf job, 
939                                         TaskStatusFilter newValue) {
940    job.set("jobclient.output.filter", newValue.toString());
941  }
942    
943  /**
944   * Returns task output filter.
945   * @return task filter. 
946   */
947  @Deprecated
948  public TaskStatusFilter getTaskOutputFilter(){
949    return this.taskOutputFilter; 
950  }
951
952  protected long getCounter(org.apache.hadoop.mapreduce.Counters cntrs,
953      String counterGroupName, String counterName) throws IOException {
954    Counters counters = Counters.downgrade(cntrs);
955    return counters.findCounter(counterGroupName, counterName).getValue();
956  }
957
958  /**
959   * Get status information about the max available Maps in the cluster.
960   *  
961   * @return the max available Maps in the cluster
962   * @throws IOException
963   */
964  public int getDefaultMaps() throws IOException {
965    try {
966      return clientUgi.doAs(new PrivilegedExceptionAction<Integer>() {
967        @Override
968        public Integer run() throws IOException, InterruptedException {
969          return cluster.getClusterStatus().getMapSlotCapacity();
970        }
971      });
972    } catch (InterruptedException ie) {
973      throw new IOException(ie);
974    }
975  }
976
977  /**
978   * Get status information about the max available Reduces in the cluster.
979   *  
980   * @return the max available Reduces in the cluster
981   * @throws IOException
982   */
983  public int getDefaultReduces() throws IOException {
984    try {
985      return clientUgi.doAs(new PrivilegedExceptionAction<Integer>() {
986        @Override
987        public Integer run() throws IOException, InterruptedException {
988          return cluster.getClusterStatus().getReduceSlotCapacity();
989        }
990      });
991    } catch (InterruptedException ie) {
992      throw new IOException(ie);
993    }
994  }
995
996  /**
997   * Grab the jobtracker system directory path where job-specific files are to be placed.
998   * 
999   * @return the system directory where job-specific files are to be placed.
1000   */
1001  public Path getSystemDir() {
1002    try {
1003      return clientUgi.doAs(new PrivilegedExceptionAction<Path>() {
1004        @Override
1005        public Path run() throws IOException, InterruptedException {
1006          return cluster.getSystemDir();
1007        }
1008      });
1009      } catch (IOException ioe) {
1010      return null;
1011    } catch (InterruptedException ie) {
1012      return null;
1013    }
1014  }
1015
1016  /**
1017   * Checks if the job directory is clean and has all the required components
1018   * for (re) starting the job
1019   */
1020  public static boolean isJobDirValid(Path jobDirPath, FileSystem fs)
1021      throws IOException {
1022    FileStatus[] contents = fs.listStatus(jobDirPath);
1023    int matchCount = 0;
1024    if (contents != null && contents.length >= 2) {
1025      for (FileStatus status : contents) {
1026        if ("job.xml".equals(status.getPath().getName())) {
1027          ++matchCount;
1028        }
1029        if ("job.split".equals(status.getPath().getName())) {
1030          ++matchCount;
1031        }
1032      }
1033      if (matchCount == 2) {
1034        return true;
1035      }
1036    }
1037    return false;
1038  }
1039
1040  /**
1041   * Fetch the staging area directory for the application
1042   * 
1043   * @return path to staging area directory
1044   * @throws IOException
1045   */
1046  public Path getStagingAreaDir() throws IOException {
1047    try {
1048      return clientUgi.doAs(new PrivilegedExceptionAction<Path>() {
1049        @Override
1050        public Path run() throws IOException, InterruptedException {
1051          return cluster.getStagingAreaDir();
1052        }
1053      });
1054    } catch (InterruptedException ie) {
1055      // throw RuntimeException instead for compatibility reasons
1056      throw new RuntimeException(ie);
1057    }
1058  }
1059
1060  private JobQueueInfo getJobQueueInfo(QueueInfo queue) {
1061    JobQueueInfo ret = new JobQueueInfo(queue);
1062    // make sure to convert any children
1063    if (queue.getQueueChildren().size() > 0) {
1064      List<JobQueueInfo> childQueues = new ArrayList<JobQueueInfo>(queue
1065          .getQueueChildren().size());
1066      for (QueueInfo child : queue.getQueueChildren()) {
1067        childQueues.add(getJobQueueInfo(child));
1068      }
1069      ret.setChildren(childQueues);
1070    }
1071    return ret;
1072  }
1073
1074  private JobQueueInfo[] getJobQueueInfoArray(QueueInfo[] queues)
1075      throws IOException {
1076    JobQueueInfo[] ret = new JobQueueInfo[queues.length];
1077    for (int i = 0; i < queues.length; i++) {
1078      ret[i] = getJobQueueInfo(queues[i]);
1079    }
1080    return ret;
1081  }
1082
1083  /**
1084   * Returns an array of queue information objects about root level queues
1085   * configured
1086   *
1087   * @return the array of root level JobQueueInfo objects
1088   * @throws IOException
1089   */
1090  public JobQueueInfo[] getRootQueues() throws IOException {
1091    try {
1092      return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
1093        public JobQueueInfo[] run() throws IOException, InterruptedException {
1094          return getJobQueueInfoArray(cluster.getRootQueues());
1095        }
1096      });
1097    } catch (InterruptedException ie) {
1098      throw new IOException(ie);
1099    }
1100  }
1101
1102  /**
1103   * Returns an array of queue information objects about immediate children
1104   * of queue queueName.
1105   * 
1106   * @param queueName
1107   * @return the array of immediate children JobQueueInfo objects
1108   * @throws IOException
1109   */
1110  public JobQueueInfo[] getChildQueues(final String queueName) throws IOException {
1111    try {
1112      return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
1113        public JobQueueInfo[] run() throws IOException, InterruptedException {
1114          return getJobQueueInfoArray(cluster.getChildQueues(queueName));
1115        }
1116      });
1117    } catch (InterruptedException ie) {
1118      throw new IOException(ie);
1119    }
1120  }
1121  
1122  /**
1123   * Return an array of queue information objects about all the Job Queues
1124   * configured.
1125   * 
1126   * @return Array of JobQueueInfo objects
1127   * @throws IOException
1128   */
1129  public JobQueueInfo[] getQueues() throws IOException {
1130    try {
1131      return clientUgi.doAs(new PrivilegedExceptionAction<JobQueueInfo[]>() {
1132        public JobQueueInfo[] run() throws IOException, InterruptedException {
1133          return getJobQueueInfoArray(cluster.getQueues());
1134        }
1135      });
1136    } catch (InterruptedException ie) {
1137      throw new IOException(ie);
1138    }
1139  }
1140  
1141  /**
1142   * Gets all the jobs which were added to particular Job Queue
1143   * 
1144   * @param queueName name of the Job Queue
1145   * @return Array of jobs present in the job queue
1146   * @throws IOException
1147   */
1148  
1149  public JobStatus[] getJobsFromQueue(final String queueName) throws IOException {
1150    try {
1151      QueueInfo queue = clientUgi.doAs(new PrivilegedExceptionAction<QueueInfo>() {
1152        @Override
1153        public QueueInfo run() throws IOException, InterruptedException {
1154          return cluster.getQueue(queueName);
1155        }
1156      });
1157      if (queue == null) {
1158        return null;
1159      }
1160      org.apache.hadoop.mapreduce.JobStatus[] stats = 
1161        queue.getJobStatuses();
1162      JobStatus[] ret = new JobStatus[stats.length];
1163      for (int i = 0 ; i < stats.length; i++ ) {
1164        ret[i] = JobStatus.downgrade(stats[i]);
1165      }
1166      return ret;
1167    } catch (InterruptedException ie) {
1168      throw new IOException(ie);
1169    }
1170  }
1171  
1172  /**
1173   * Gets the queue information associated to a particular Job Queue
1174   * 
1175   * @param queueName name of the job queue.
1176   * @return Queue information associated to particular queue.
1177   * @throws IOException
1178   */
1179  public JobQueueInfo getQueueInfo(final String queueName) throws IOException {
1180    try {
1181      QueueInfo queueInfo = clientUgi.doAs(new 
1182          PrivilegedExceptionAction<QueueInfo>() {
1183        public QueueInfo run() throws IOException, InterruptedException {
1184          return cluster.getQueue(queueName);
1185        }
1186      });
1187      if (queueInfo != null) {
1188        return new JobQueueInfo(queueInfo);
1189      }
1190      return null;
1191    } catch (InterruptedException ie) {
1192      throw new IOException(ie);
1193    }
1194  }
1195  
1196  /**
1197   * Gets the Queue ACLs for current user
1198   * @return array of QueueAclsInfo object for current user.
1199   * @throws IOException
1200   */
1201  public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException {
1202    try {
1203      org.apache.hadoop.mapreduce.QueueAclsInfo[] acls = 
1204        clientUgi.doAs(new 
1205            PrivilegedExceptionAction
1206            <org.apache.hadoop.mapreduce.QueueAclsInfo[]>() {
1207              public org.apache.hadoop.mapreduce.QueueAclsInfo[] run() 
1208              throws IOException, InterruptedException {
1209                return cluster.getQueueAclsForCurrentUser();
1210              }
1211        });
1212      QueueAclsInfo[] ret = new QueueAclsInfo[acls.length];
1213      for (int i = 0 ; i < acls.length; i++ ) {
1214        ret[i] = QueueAclsInfo.downgrade(acls[i]);
1215      }
1216      return ret;
1217    } catch (InterruptedException ie) {
1218      throw new IOException(ie);
1219    }
1220  }
1221
1222  /**
1223   * Get a delegation token for the user from the JobTracker.
1224   * @param renewer the user who can renew the token
1225   * @return the new token
1226   * @throws IOException
1227   */
1228  public Token<DelegationTokenIdentifier> 
1229    getDelegationToken(final Text renewer) throws IOException, InterruptedException {
1230    return clientUgi.doAs(new 
1231        PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
1232      public Token<DelegationTokenIdentifier> run() throws IOException, 
1233      InterruptedException {
1234        return cluster.getDelegationToken(renewer);
1235      }
1236    });
1237  }
1238
1239  /**
1240   * Renew a delegation token
1241   * @param token the token to renew
1242   * @return true if the renewal went well
1243   * @throws InvalidToken
1244   * @throws IOException
1245   * @deprecated Use {@link Token#renew} instead
1246   */
1247  public long renewDelegationToken(Token<DelegationTokenIdentifier> token
1248                                   ) throws InvalidToken, IOException, 
1249                                            InterruptedException {
1250    return token.renew(getConf());
1251  }
1252
1253  /**
1254   * Cancel a delegation token from the JobTracker
1255   * @param token the token to cancel
1256   * @throws IOException
1257   * @deprecated Use {@link Token#cancel} instead
1258   */
1259  public void cancelDelegationToken(Token<DelegationTokenIdentifier> token
1260                                    ) throws InvalidToken, IOException, 
1261                                             InterruptedException {
1262    token.cancel(getConf());
1263  }
1264
1265  /**
1266   */
1267  public static void main(String argv[]) throws Exception {
1268    int res = ToolRunner.run(new JobClient(), argv);
1269    System.exit(res);
1270  }
1271}
1272