001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.fs;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.lang.reflect.Constructor;
023import java.net.URI;
024import java.net.URISyntaxException;
025import java.util.ArrayList;
026import java.util.Collection;
027import java.util.EnumSet;
028import java.util.HashMap;
029import java.util.List;
030import java.util.Map;
031import java.util.NoSuchElementException;
032import java.util.StringTokenizer;
033import java.util.concurrent.ConcurrentHashMap;
034
035import org.apache.commons.logging.Log;
036import org.apache.commons.logging.LogFactory;
037import org.apache.hadoop.HadoopIllegalArgumentException;
038import org.apache.hadoop.classification.InterfaceAudience;
039import org.apache.hadoop.classification.InterfaceStability;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.fs.FileSystem.Statistics;
042import org.apache.hadoop.fs.Options.ChecksumOpt;
043import org.apache.hadoop.fs.Options.CreateOpts;
044import org.apache.hadoop.fs.Options.Rename;
045import org.apache.hadoop.fs.permission.AclEntry;
046import org.apache.hadoop.fs.permission.AclStatus;
047import org.apache.hadoop.fs.permission.FsAction;
048import org.apache.hadoop.fs.permission.FsPermission;
049import org.apache.hadoop.security.AccessControlException;
050import org.apache.hadoop.security.SecurityUtil;
051import org.apache.hadoop.security.token.Token;
052import org.apache.hadoop.util.Progressable;
053
054import com.google.common.annotations.VisibleForTesting;
055
056/**
057 * This class provides an interface for implementors of a Hadoop file system
058 * (analogous to the VFS of Unix). Applications do not access this class;
059 * instead they access files across all file systems using {@link FileContext}.
060 * 
061 * Pathnames passed to AbstractFileSystem can be fully qualified URI that
062 * matches the "this" file system (ie same scheme and authority) 
063 * or a Slash-relative name that is assumed to be relative
064 * to the root of the "this" file system .
065 */
066@InterfaceAudience.Public
067@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
068public abstract class AbstractFileSystem {
069  static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
070
071  /** Recording statistics per a file system class. */
072  private static final Map<URI, Statistics> 
073      STATISTICS_TABLE = new HashMap<URI, Statistics>();
074  
075  /** Cache of constructors for each file system class. */
076  private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 
077    new ConcurrentHashMap<Class<?>, Constructor<?>>();
078  
079  private static final Class<?>[] URI_CONFIG_ARGS = 
080    new Class[]{URI.class, Configuration.class};
081  
082  /** The statistics for this file system. */
083  protected Statistics statistics;
084
085  @VisibleForTesting
086  static final String NO_ABSTRACT_FS_ERROR = "No AbstractFileSystem configured for scheme";
087  
088  private final URI myUri;
089  
090  public Statistics getStatistics() {
091    return statistics;
092  }
093  
094  /**
095   * Returns true if the specified string is considered valid in the path part
096   * of a URI by this file system.  The default implementation enforces the rules
097   * of HDFS, but subclasses may override this method to implement specific
098   * validation rules for specific file systems.
099   * 
100   * @param src String source filename to check, path part of the URI
101   * @return boolean true if the specified string is considered valid
102   */
103  public boolean isValidName(String src) {
104    // Prohibit ".." "." and anything containing ":"
105    StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
106    while(tokens.hasMoreTokens()) {
107      String element = tokens.nextToken();
108      if (element.equals("..") ||
109          element.equals(".")  ||
110          (element.indexOf(":") >= 0)) {
111        return false;
112      }
113    }
114    return true;
115  }
116  
117  /** 
118   * Create an object for the given class and initialize it from conf.
119   * @param theClass class of which an object is created
120   * @param conf Configuration
121   * @return a new object
122   */
123  @SuppressWarnings("unchecked")
124  static <T> T newInstance(Class<T> theClass,
125    URI uri, Configuration conf) {
126    T result;
127    try {
128      Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
129      if (meth == null) {
130        meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS);
131        meth.setAccessible(true);
132        CONSTRUCTOR_CACHE.put(theClass, meth);
133      }
134      result = meth.newInstance(uri, conf);
135    } catch (Exception e) {
136      throw new RuntimeException(e);
137    }
138    return result;
139  }
140  
141  /**
142   * Create a file system instance for the specified uri using the conf. The
143   * conf is used to find the class name that implements the file system. The
144   * conf is also passed to the file system for its configuration.
145   *
146   * @param uri URI of the file system
147   * @param conf Configuration for the file system
148   * 
149   * @return Returns the file system for the given URI
150   *
151   * @throws UnsupportedFileSystemException file system for <code>uri</code> is
152   *           not found
153   */
154  public static AbstractFileSystem createFileSystem(URI uri, Configuration conf)
155      throws UnsupportedFileSystemException {
156    final String fsImplConf = String.format("fs.AbstractFileSystem.%s.impl",
157        uri.getScheme());
158
159    Class<?> clazz = conf.getClass(fsImplConf, null);
160    if (clazz == null) {
161      throw new UnsupportedFileSystemException(String.format(
162          "%s=null: %s: %s",
163          fsImplConf, NO_ABSTRACT_FS_ERROR, uri.getScheme()));
164    }
165    return (AbstractFileSystem) newInstance(clazz, uri, conf);
166  }
167
168  /**
169   * Get the statistics for a particular file system.
170   * 
171   * @param uri
172   *          used as key to lookup STATISTICS_TABLE. Only scheme and authority
173   *          part of the uri are used.
174   * @return a statistics object
175   */
176  protected static synchronized Statistics getStatistics(URI uri) {
177    String scheme = uri.getScheme();
178    if (scheme == null) {
179      throw new IllegalArgumentException("Scheme not defined in the uri: "
180          + uri);
181    }
182    URI baseUri = getBaseUri(uri);
183    Statistics result = STATISTICS_TABLE.get(baseUri);
184    if (result == null) {
185      result = new Statistics(scheme);
186      STATISTICS_TABLE.put(baseUri, result);
187    }
188    return result;
189  }
190  
191  private static URI getBaseUri(URI uri) {
192    String scheme = uri.getScheme();
193    String authority = uri.getAuthority();
194    String baseUriString = scheme + "://";
195    if (authority != null) {
196      baseUriString = baseUriString + authority;
197    } else {
198      baseUriString = baseUriString + "/";
199    }
200    return URI.create(baseUriString);
201  }
202  
203  public static synchronized void clearStatistics() {
204    for(Statistics stat: STATISTICS_TABLE.values()) {
205      stat.reset();
206    }
207  }
208
209  /**
210   * Prints statistics for all file systems.
211   */
212  public static synchronized void printStatistics() {
213    for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
214      System.out.println("  FileSystem " + pair.getKey().getScheme() + "://"
215          + pair.getKey().getAuthority() + ": " + pair.getValue());
216    }
217  }
218  
219  protected static synchronized Map<URI, Statistics> getAllStatistics() {
220    Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>(
221        STATISTICS_TABLE.size());
222    for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
223      URI key = pair.getKey();
224      Statistics value = pair.getValue();
225      Statistics newStatsObj = new Statistics(value);
226      statsMap.put(URI.create(key.toString()), newStatsObj);
227    }
228    return statsMap;
229  }
230
231  /**
232   * The main factory method for creating a file system. Get a file system for
233   * the URI's scheme and authority. The scheme of the <code>uri</code>
234   * determines a configuration property name,
235   * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
236   * AbstractFileSystem class.
237   * 
238   * The entire URI and conf is passed to the AbstractFileSystem factory method.
239   * 
240   * @param uri for the file system to be created.
241   * @param conf which is passed to the file system impl.
242   * 
243   * @return file system for the given URI.
244   * 
245   * @throws UnsupportedFileSystemException if the file system for
246   *           <code>uri</code> is not supported.
247   */
248  public static AbstractFileSystem get(final URI uri, final Configuration conf)
249      throws UnsupportedFileSystemException {
250    return createFileSystem(uri, conf);
251  }
252
253  /**
254   * Constructor to be called by subclasses.
255   * 
256   * @param uri for this file system.
257   * @param supportedScheme the scheme supported by the implementor
258   * @param authorityNeeded if true then theURI must have authority, if false
259   *          then the URI must have null authority.
260   *
261   * @throws URISyntaxException <code>uri</code> has syntax error
262   */
263  public AbstractFileSystem(final URI uri, final String supportedScheme,
264      final boolean authorityNeeded, final int defaultPort)
265      throws URISyntaxException {
266    myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort);
267    statistics = getStatistics(uri); 
268  }
269  
270  /**
271   * Check that the Uri's scheme matches
272   * @param uri
273   * @param supportedScheme
274   */
275  public void checkScheme(URI uri, String supportedScheme) {
276    String scheme = uri.getScheme();
277    if (scheme == null) {
278      throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
279    }
280    if (!scheme.equals(supportedScheme)) {
281      throw new HadoopIllegalArgumentException("Uri scheme " + uri
282          + " does not match the scheme " + supportedScheme);
283    }
284  }
285
286  /**
287   * Get the URI for the file system based on the given URI. The path, query
288   * part of the given URI is stripped out and default file system port is used
289   * to form the URI.
290   * 
291   * @param uri FileSystem URI.
292   * @param authorityNeeded if true authority cannot be null in the URI. If
293   *          false authority must be null.
294   * @param defaultPort default port to use if port is not specified in the URI.
295   * 
296   * @return URI of the file system
297   * 
298   * @throws URISyntaxException <code>uri</code> has syntax error
299   */
300  private URI getUri(URI uri, String supportedScheme,
301      boolean authorityNeeded, int defaultPort) throws URISyntaxException {
302    checkScheme(uri, supportedScheme);
303    // A file system implementation that requires authority must always
304    // specify default port
305    if (defaultPort < 0 && authorityNeeded) {
306      throw new HadoopIllegalArgumentException(
307          "FileSystem implementation error -  default port " + defaultPort
308              + " is not valid");
309    }
310    String authority = uri.getAuthority();
311    if (authority == null) {
312       if (authorityNeeded) {
313         throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
314       } else {
315         return new URI(supportedScheme + ":///");
316       }   
317    }
318    // authority is non null  - AuthorityNeeded may be true or false.
319    int port = uri.getPort();
320    port = (port == -1 ? defaultPort : port);
321    if (port == -1) { // no port supplied and default port is not specified
322      return new URI(supportedScheme, authority, "/", null);
323    }
324    return new URI(supportedScheme + "://" + uri.getHost() + ":" + port);
325  }
326  
327  /**
328   * The default port of this file system.
329   * 
330   * @return default port of this file system's Uri scheme
331   *         A uri with a port of -1 => default port;
332   */
333  public abstract int getUriDefaultPort();
334
335  /**
336   * Returns a URI whose scheme and authority identify this FileSystem.
337   * 
338   * @return the uri of this file system.
339   */
340  public URI getUri() {
341    return myUri;
342  }
343  
344  /**
345   * Check that a Path belongs to this FileSystem.
346   * 
347   * If the path is fully qualified URI, then its scheme and authority
348   * matches that of this file system. Otherwise the path must be 
349   * slash-relative name.
350   * 
351   * @throws InvalidPathException if the path is invalid
352   */
353  public void checkPath(Path path) {
354    URI uri = path.toUri();
355    String thatScheme = uri.getScheme();
356    String thatAuthority = uri.getAuthority();
357    if (thatScheme == null) {
358      if (thatAuthority == null) {
359        if (path.isUriPathAbsolute()) {
360          return;
361        }
362        throw new InvalidPathException("relative paths not allowed:" + 
363            path);
364      } else {
365        throw new InvalidPathException(
366            "Path without scheme with non-null authority:" + path);
367      }
368    }
369    String thisScheme = this.getUri().getScheme();
370    String thisHost = this.getUri().getHost();
371    String thatHost = uri.getHost();
372    
373    // Schemes and hosts must match.
374    // Allow for null Authority for file:///
375    if (!thisScheme.equalsIgnoreCase(thatScheme) ||
376       (thisHost != null && 
377            !thisHost.equalsIgnoreCase(thatHost)) ||
378       (thisHost == null && thatHost != null)) {
379      throw new InvalidPathException("Wrong FS: " + path + ", expected: "
380          + this.getUri());
381    }
382    
383    // Ports must match, unless this FS instance is using the default port, in
384    // which case the port may be omitted from the given URI
385    int thisPort = this.getUri().getPort();
386    int thatPort = uri.getPort();
387    if (thatPort == -1) { // -1 => defaultPort of Uri scheme
388      thatPort = this.getUriDefaultPort();
389    }
390    if (thisPort != thatPort) {
391      throw new InvalidPathException("Wrong FS: " + path + ", expected: "
392          + this.getUri());
393    }
394  }
395  
396  /**
397   * Get the path-part of a pathname. Checks that URI matches this file system
398   * and that the path-part is a valid name.
399   * 
400   * @param p path
401   * 
402   * @return path-part of the Path p
403   */
404  public String getUriPath(final Path p) {
405    checkPath(p);
406    String s = p.toUri().getPath();
407    if (!isValidName(s)) {
408      throw new InvalidPathException("Path part " + s + " from URI " + p
409          + " is not a valid filename.");
410    }
411    return s;
412  }
413  
414  /**
415   * Make the path fully qualified to this file system
416   * @param path
417   * @return the qualified path
418   */
419  public Path makeQualified(Path path) {
420    checkPath(path);
421    return path.makeQualified(this.getUri(), null);
422  }
423  
424  /**
425   * Some file systems like LocalFileSystem have an initial workingDir
426   * that is used as the starting workingDir. For other file systems
427   * like HDFS there is no built in notion of an initial workingDir.
428   * 
429   * @return the initial workingDir if the file system has such a notion
430   *         otherwise return a null.
431   */
432  public Path getInitialWorkingDirectory() {
433    return null;
434  }
435  
436  /** 
437   * Return the current user's home directory in this file system.
438   * The default implementation returns "/user/$USER/".
439   * 
440   * @return current user's home directory.
441   */
442  public Path getHomeDirectory() {
443    return new Path("/user/"+System.getProperty("user.name")).makeQualified(
444                                                                getUri(), null);
445  }
446  
447  /**
448   * Return a set of server default configuration values.
449   * 
450   * @return server default configuration values
451   * 
452   * @throws IOException an I/O error occurred
453   */
454  public abstract FsServerDefaults getServerDefaults() throws IOException; 
455
456  /**
457   * Return the fully-qualified path of path f resolving the path
458   * through any internal symlinks or mount point
459   * @param p path to be resolved
460   * @return fully qualified path 
461   * @throws FileNotFoundException, AccessControlException, IOException
462   *         UnresolvedLinkException if symbolic link on path cannot be resolved
463   *          internally
464   */
465   public Path resolvePath(final Path p) throws FileNotFoundException,
466           UnresolvedLinkException, AccessControlException, IOException {
467     checkPath(p);
468     return getFileStatus(p).getPath(); // default impl is to return the path
469   }
470  
471  /**
472   * The specification of this method matches that of
473   * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
474   * that the Path f must be fully qualified and the permission is absolute
475   * (i.e. umask has been applied).
476   */
477  public final FSDataOutputStream create(final Path f,
478      final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
479      throws AccessControlException, FileAlreadyExistsException,
480      FileNotFoundException, ParentNotDirectoryException,
481      UnsupportedFileSystemException, UnresolvedLinkException, IOException {
482    checkPath(f);
483    int bufferSize = -1;
484    short replication = -1;
485    long blockSize = -1;
486    int bytesPerChecksum = -1;
487    ChecksumOpt checksumOpt = null;
488    FsPermission permission = null;
489    Progressable progress = null;
490    Boolean createParent = null;
491 
492    for (CreateOpts iOpt : opts) {
493      if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
494        if (blockSize != -1) {
495          throw new HadoopIllegalArgumentException(
496              "BlockSize option is set multiple times");
497        }
498        blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
499      } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
500        if (bufferSize != -1) {
501          throw new HadoopIllegalArgumentException(
502              "BufferSize option is set multiple times");
503        }
504        bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
505      } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
506        if (replication != -1) {
507          throw new HadoopIllegalArgumentException(
508              "ReplicationFactor option is set multiple times");
509        }
510        replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
511      } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
512        if (bytesPerChecksum != -1) {
513          throw new HadoopIllegalArgumentException(
514              "BytesPerChecksum option is set multiple times");
515        }
516        bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
517      } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
518        if (checksumOpt != null) {
519          throw new  HadoopIllegalArgumentException(
520              "CreateChecksumType option is set multiple times");
521        }
522        checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
523      } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
524        if (permission != null) {
525          throw new HadoopIllegalArgumentException(
526              "Perms option is set multiple times");
527        }
528        permission = ((CreateOpts.Perms) iOpt).getValue();
529      } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
530        if (progress != null) {
531          throw new HadoopIllegalArgumentException(
532              "Progress option is set multiple times");
533        }
534        progress = ((CreateOpts.Progress) iOpt).getValue();
535      } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
536        if (createParent != null) {
537          throw new HadoopIllegalArgumentException(
538              "CreateParent option is set multiple times");
539        }
540        createParent = ((CreateOpts.CreateParent) iOpt).getValue();
541      } else {
542        throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
543            iOpt.getClass().getName());
544      }
545    }
546    if (permission == null) {
547      throw new HadoopIllegalArgumentException("no permission supplied");
548    }
549
550
551    FsServerDefaults ssDef = getServerDefaults();
552    if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
553      throw new IOException("Internal error: default blockSize is" + 
554          " not a multiple of default bytesPerChecksum ");
555    }
556    
557    if (blockSize == -1) {
558      blockSize = ssDef.getBlockSize();
559    }
560
561    // Create a checksum option honoring user input as much as possible.
562    // If bytesPerChecksum is specified, it will override the one set in
563    // checksumOpt. Any missing value will be filled in using the default.
564    ChecksumOpt defaultOpt = new ChecksumOpt(
565        ssDef.getChecksumType(),
566        ssDef.getBytesPerChecksum());
567    checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt,
568        checksumOpt, bytesPerChecksum);
569
570    if (bufferSize == -1) {
571      bufferSize = ssDef.getFileBufferSize();
572    }
573    if (replication == -1) {
574      replication = ssDef.getReplication();
575    }
576    if (createParent == null) {
577      createParent = false;
578    }
579
580    if (blockSize % bytesPerChecksum != 0) {
581      throw new HadoopIllegalArgumentException(
582             "blockSize should be a multiple of checksumsize");
583    }
584
585    return this.createInternal(f, createFlag, permission, bufferSize,
586      replication, blockSize, progress, checksumOpt, createParent);
587  }
588
589  /**
590   * The specification of this method matches that of
591   * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
592   * have been declared explicitly.
593   */
594  public abstract FSDataOutputStream createInternal(Path f,
595      EnumSet<CreateFlag> flag, FsPermission absolutePermission,
596      int bufferSize, short replication, long blockSize, Progressable progress,
597      ChecksumOpt checksumOpt, boolean createParent)
598      throws AccessControlException, FileAlreadyExistsException,
599      FileNotFoundException, ParentNotDirectoryException,
600      UnsupportedFileSystemException, UnresolvedLinkException, IOException;
601
602  /**
603   * The specification of this method matches that of
604   * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
605   * f must be fully qualified and the permission is absolute (i.e. 
606   * umask has been applied).
607   */
608  public abstract void mkdir(final Path dir, final FsPermission permission,
609      final boolean createParent) throws AccessControlException,
610      FileAlreadyExistsException, FileNotFoundException,
611      UnresolvedLinkException, IOException;
612
613  /**
614   * The specification of this method matches that of
615   * {@link FileContext#delete(Path, boolean)} except that Path f must be for
616   * this file system.
617   */
618  public abstract boolean delete(final Path f, final boolean recursive)
619      throws AccessControlException, FileNotFoundException,
620      UnresolvedLinkException, IOException;
621
622  /**
623   * The specification of this method matches that of
624   * {@link FileContext#open(Path)} except that Path f must be for this
625   * file system.
626   */
627  public FSDataInputStream open(final Path f) throws AccessControlException,
628      FileNotFoundException, UnresolvedLinkException, IOException {
629    return open(f, getServerDefaults().getFileBufferSize());
630  }
631
632  /**
633   * The specification of this method matches that of
634   * {@link FileContext#open(Path, int)} except that Path f must be for this
635   * file system.
636   */
637  public abstract FSDataInputStream open(final Path f, int bufferSize)
638      throws AccessControlException, FileNotFoundException,
639      UnresolvedLinkException, IOException;
640
641  /**
642   * The specification of this method matches that of
643   * {@link FileContext#truncate(Path, long)} except that Path f must be for
644   * this file system.
645   */
646  public boolean truncate(Path f, long newLength)
647      throws AccessControlException, FileNotFoundException,
648      UnresolvedLinkException, IOException {
649    throw new UnsupportedOperationException(getClass().getSimpleName()
650        + " doesn't support truncate");
651  }
652
653  /**
654   * The specification of this method matches that of
655   * {@link FileContext#setReplication(Path, short)} except that Path f must be
656   * for this file system.
657   */
658  public abstract boolean setReplication(final Path f,
659      final short replication) throws AccessControlException,
660      FileNotFoundException, UnresolvedLinkException, IOException;
661
662  /**
663   * The specification of this method matches that of
664   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
665   * f must be for this file system.
666   */
667  public final void rename(final Path src, final Path dst,
668      final Options.Rename... options) throws AccessControlException,
669      FileAlreadyExistsException, FileNotFoundException,
670      ParentNotDirectoryException, UnresolvedLinkException, IOException {
671    boolean overwrite = false;
672    if (null != options) {
673      for (Rename option : options) {
674        if (option == Rename.OVERWRITE) {
675          overwrite = true;
676        }
677      }
678    }
679    renameInternal(src, dst, overwrite);
680  }
681  
682  /**
683   * The specification of this method matches that of
684   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
685   * f must be for this file system and NO OVERWRITE is performed.
686   * 
687   * File systems that do not have a built in overwrite need implement only this
688   * method and can take advantage of the default impl of the other
689   * {@link #renameInternal(Path, Path, boolean)}
690   */
691  public abstract void renameInternal(final Path src, final Path dst)
692      throws AccessControlException, FileAlreadyExistsException,
693      FileNotFoundException, ParentNotDirectoryException,
694      UnresolvedLinkException, IOException;
695  
696  /**
697   * The specification of this method matches that of
698   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
699   * f must be for this file system.
700   */
701  public void renameInternal(final Path src, final Path dst,
702      boolean overwrite) throws AccessControlException,
703      FileAlreadyExistsException, FileNotFoundException,
704      ParentNotDirectoryException, UnresolvedLinkException, IOException {
705    // Default implementation deals with overwrite in a non-atomic way
706    final FileStatus srcStatus = getFileLinkStatus(src);
707
708    FileStatus dstStatus;
709    try {
710      dstStatus = getFileLinkStatus(dst);
711    } catch (IOException e) {
712      dstStatus = null;
713    }
714    if (dstStatus != null) {
715      if (dst.equals(src)) {
716        throw new FileAlreadyExistsException(
717            "The source "+src+" and destination "+dst+" are the same");
718      }
719      if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) {
720        throw new FileAlreadyExistsException(
721            "Cannot rename symlink "+src+" to its target "+dst);
722      }
723      // It's OK to rename a file to a symlink and vice versa
724      if (srcStatus.isDirectory() != dstStatus.isDirectory()) {
725        throw new IOException("Source " + src + " and destination " + dst
726            + " must both be directories");
727      }
728      if (!overwrite) {
729        throw new FileAlreadyExistsException("Rename destination " + dst
730            + " already exists.");
731      }
732      // Delete the destination that is a file or an empty directory
733      if (dstStatus.isDirectory()) {
734        RemoteIterator<FileStatus> list = listStatusIterator(dst);
735        if (list != null && list.hasNext()) {
736          throw new IOException(
737              "Rename cannot overwrite non empty destination directory " + dst);
738        }
739      }
740      delete(dst, false);
741    } else {
742      final Path parent = dst.getParent();
743      final FileStatus parentStatus = getFileStatus(parent);
744      if (parentStatus.isFile()) {
745        throw new ParentNotDirectoryException("Rename destination parent "
746            + parent + " is a file.");
747      }
748    }
749    renameInternal(src, dst);
750  }
751  
752  /**
753   * Returns true if the file system supports symlinks, false otherwise.
754   * @return true if filesystem supports symlinks
755   */
756  public boolean supportsSymlinks() {
757    return false;
758  }
759  
760  /**
761   * The specification of this method matches that of  
762   * {@link FileContext#createSymlink(Path, Path, boolean)};
763   */
764  public void createSymlink(final Path target, final Path link,
765      final boolean createParent) throws IOException, UnresolvedLinkException {
766    throw new IOException("File system does not support symlinks");    
767  }
768
769  /**
770   * Partially resolves the path. This is used during symlink resolution in
771   * {@link FSLinkResolver}, and differs from the similarly named method
772   * {@link FileContext#getLinkTarget(Path)}.
773   * @throws IOException subclass implementations may throw IOException 
774   */
775  public Path getLinkTarget(final Path f) throws IOException {
776    throw new AssertionError("Implementation Error: " + getClass()
777        + " that threw an UnresolvedLinkException, causing this method to be"
778        + " called, needs to override this method.");
779  }
780    
781  /**
782   * The specification of this method matches that of
783   * {@link FileContext#setPermission(Path, FsPermission)} except that Path f
784   * must be for this file system.
785   */
786  public abstract void setPermission(final Path f,
787      final FsPermission permission) throws AccessControlException,
788      FileNotFoundException, UnresolvedLinkException, IOException;
789
790  /**
791   * The specification of this method matches that of
792   * {@link FileContext#setOwner(Path, String, String)} except that Path f must
793   * be for this file system.
794   */
795  public abstract void setOwner(final Path f, final String username,
796      final String groupname) throws AccessControlException,
797      FileNotFoundException, UnresolvedLinkException, IOException;
798
799  /**
800   * The specification of this method matches that of
801   * {@link FileContext#setTimes(Path, long, long)} except that Path f must be
802   * for this file system.
803   */
804  public abstract void setTimes(final Path f, final long mtime,
805    final long atime) throws AccessControlException, FileNotFoundException,
806      UnresolvedLinkException, IOException;
807
808  /**
809   * The specification of this method matches that of
810   * {@link FileContext#getFileChecksum(Path)} except that Path f must be for
811   * this file system.
812   */
813  public abstract FileChecksum getFileChecksum(final Path f)
814      throws AccessControlException, FileNotFoundException,
815      UnresolvedLinkException, IOException;
816  
817  /**
818   * The specification of this method matches that of
819   * {@link FileContext#getFileStatus(Path)} 
820   * except that an UnresolvedLinkException may be thrown if a symlink is 
821   * encountered in the path.
822   */
823  public abstract FileStatus getFileStatus(final Path f)
824      throws AccessControlException, FileNotFoundException,
825      UnresolvedLinkException, IOException;
826
827  /**
828   * The specification of this method matches that of
829   * {@link FileContext#access(Path, FsAction)}
830   * except that an UnresolvedLinkException may be thrown if a symlink is
831   * encountered in the path.
832   */
833  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
834  public void access(Path path, FsAction mode) throws AccessControlException,
835      FileNotFoundException, UnresolvedLinkException, IOException {
836    FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
837  }
838
839  /**
840   * The specification of this method matches that of
841   * {@link FileContext#getFileLinkStatus(Path)}
842   * except that an UnresolvedLinkException may be thrown if a symlink is  
843   * encountered in the path leading up to the final path component.
844   * If the file system does not support symlinks then the behavior is
845   * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}.
846   */
847  public FileStatus getFileLinkStatus(final Path f)
848      throws AccessControlException, FileNotFoundException,
849      UnsupportedFileSystemException, IOException {
850    return getFileStatus(f);
851  }
852
853  /**
854   * The specification of this method matches that of
855   * {@link FileContext#getFileBlockLocations(Path, long, long)} except that
856   * Path f must be for this file system.
857   */
858  public abstract BlockLocation[] getFileBlockLocations(final Path f,
859      final long start, final long len) throws AccessControlException,
860      FileNotFoundException, UnresolvedLinkException, IOException;
861
862  /**
863   * The specification of this method matches that of
864   * {@link FileContext#getFsStatus(Path)} except that Path f must be for this
865   * file system.
866   */
867  public FsStatus getFsStatus(final Path f) throws AccessControlException,
868      FileNotFoundException, UnresolvedLinkException, IOException {
869    // default impl gets FsStatus of root
870    return getFsStatus();
871  }
872  
873  /**
874   * The specification of this method matches that of
875   * {@link FileContext#getFsStatus(Path)}.
876   */
877  public abstract FsStatus getFsStatus() throws AccessControlException,
878      FileNotFoundException, IOException;
879
880  /**
881   * The specification of this method matches that of
882   * {@link FileContext#listStatus(Path)} except that Path f must be for this
883   * file system.
884   */
885  public RemoteIterator<FileStatus> listStatusIterator(final Path f)
886      throws AccessControlException, FileNotFoundException,
887      UnresolvedLinkException, IOException {
888    return new RemoteIterator<FileStatus>() {
889      private int i = 0;
890      private FileStatus[] statusList = listStatus(f);
891      
892      @Override
893      public boolean hasNext() {
894        return i < statusList.length;
895      }
896      
897      @Override
898      public FileStatus next() {
899        if (!hasNext()) {
900          throw new NoSuchElementException();
901        }
902        return statusList[i++];
903      }
904    };
905  }
906
907  /**
908   * The specification of this method matches that of
909   * {@link FileContext#listLocatedStatus(Path)} except that Path f 
910   * must be for this file system.
911   */
912  public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
913      throws AccessControlException, FileNotFoundException,
914      UnresolvedLinkException, IOException {
915    return new RemoteIterator<LocatedFileStatus>() {
916      private RemoteIterator<FileStatus> itor = listStatusIterator(f);
917      
918      @Override
919      public boolean hasNext() throws IOException {
920        return itor.hasNext();
921      }
922      
923      @Override
924      public LocatedFileStatus next() throws IOException {
925        if (!hasNext()) {
926          throw new NoSuchElementException("No more entry in " + f);
927        }
928        FileStatus result = itor.next();
929        BlockLocation[] locs = null;
930        if (result.isFile()) {
931          locs = getFileBlockLocations(
932              result.getPath(), 0, result.getLen());
933        }
934        return new LocatedFileStatus(result, locs);
935      }
936    };
937  }
938
939  /**
940   * The specification of this method matches that of
941   * {@link FileContext.Util#listStatus(Path)} except that Path f must be 
942   * for this file system.
943   */
944  public abstract FileStatus[] listStatus(final Path f)
945      throws AccessControlException, FileNotFoundException,
946      UnresolvedLinkException, IOException;
947
948  /**
949   * @return an iterator over the corrupt files under the given path
950   * (may contain duplicates if a file has more than one corrupt block)
951   * @throws IOException
952   */
953  public RemoteIterator<Path> listCorruptFileBlocks(Path path)
954    throws IOException {
955    throw new UnsupportedOperationException(getClass().getCanonicalName() +
956                                            " does not support" +
957                                            " listCorruptFileBlocks");
958  }
959
960  /**
961   * The specification of this method matches that of
962   * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f
963   * must be for this file system.
964   */
965  public abstract void setVerifyChecksum(final boolean verifyChecksum)
966      throws AccessControlException, IOException;
967  
968  /**
969   * Get a canonical name for this file system.
970   * @return a URI string that uniquely identifies this file system
971   */
972  public String getCanonicalServiceName() {
973    return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort());
974  }
975  
976  /**
977   * Get one or more delegation tokens associated with the filesystem. Normally
978   * a file system returns a single delegation token. A file system that manages
979   * multiple file systems underneath, could return set of delegation tokens for
980   * all the file systems it manages
981   * 
982   * @param renewer the account name that is allowed to renew the token.
983   * @return List of delegation tokens.
984   *   If delegation tokens not supported then return a list of size zero.
985   * @throws IOException
986   */
987  @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
988  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
989    return new ArrayList<Token<?>>(0);
990  }
991
992  /**
993   * Modifies ACL entries of files and directories.  This method can add new ACL
994   * entries or modify the permissions on existing ACL entries.  All existing
995   * ACL entries that are not specified in this call are retained without
996   * changes.  (Modifications are merged into the current ACL.)
997   *
998   * @param path Path to modify
999   * @param aclSpec List<AclEntry> describing modifications
1000   * @throws IOException if an ACL could not be modified
1001   */
1002  public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
1003      throws IOException {
1004    throw new UnsupportedOperationException(getClass().getSimpleName()
1005        + " doesn't support modifyAclEntries");
1006  }
1007
1008  /**
1009   * Removes ACL entries from files and directories.  Other ACL entries are
1010   * retained.
1011   *
1012   * @param path Path to modify
1013   * @param aclSpec List<AclEntry> describing entries to remove
1014   * @throws IOException if an ACL could not be modified
1015   */
1016  public void removeAclEntries(Path path, List<AclEntry> aclSpec)
1017      throws IOException {
1018    throw new UnsupportedOperationException(getClass().getSimpleName()
1019        + " doesn't support removeAclEntries");
1020  }
1021
1022  /**
1023   * Removes all default ACL entries from files and directories.
1024   *
1025   * @param path Path to modify
1026   * @throws IOException if an ACL could not be modified
1027   */
1028  public void removeDefaultAcl(Path path)
1029      throws IOException {
1030    throw new UnsupportedOperationException(getClass().getSimpleName()
1031        + " doesn't support removeDefaultAcl");
1032  }
1033
1034  /**
1035   * Removes all but the base ACL entries of files and directories.  The entries
1036   * for user, group, and others are retained for compatibility with permission
1037   * bits.
1038   *
1039   * @param path Path to modify
1040   * @throws IOException if an ACL could not be removed
1041   */
1042  public void removeAcl(Path path)
1043      throws IOException {
1044    throw new UnsupportedOperationException(getClass().getSimpleName()
1045        + " doesn't support removeAcl");
1046  }
1047
1048  /**
1049   * Fully replaces ACL of files and directories, discarding all existing
1050   * entries.
1051   *
1052   * @param path Path to modify
1053   * @param aclSpec List<AclEntry> describing modifications, must include entries
1054   *   for user, group, and others for compatibility with permission bits.
1055   * @throws IOException if an ACL could not be modified
1056   */
1057  public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
1058    throw new UnsupportedOperationException(getClass().getSimpleName()
1059        + " doesn't support setAcl");
1060  }
1061
1062  /**
1063   * Gets the ACLs of files and directories.
1064   *
1065   * @param path Path to get
1066   * @return RemoteIterator<AclStatus> which returns each AclStatus
1067   * @throws IOException if an ACL could not be read
1068   */
1069  public AclStatus getAclStatus(Path path) throws IOException {
1070    throw new UnsupportedOperationException(getClass().getSimpleName()
1071        + " doesn't support getAclStatus");
1072  }
1073
1074  /**
1075   * Set an xattr of a file or directory.
1076   * The name must be prefixed with the namespace followed by ".". For example,
1077   * "user.attr".
1078   * <p/>
1079   * Refer to the HDFS extended attributes user documentation for details.
1080   *
1081   * @param path Path to modify
1082   * @param name xattr name.
1083   * @param value xattr value.
1084   * @throws IOException
1085   */
1086  public void setXAttr(Path path, String name, byte[] value)
1087      throws IOException {
1088    setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
1089        XAttrSetFlag.REPLACE));
1090  }
1091
1092  /**
1093   * Set an xattr of a file or directory.
1094   * The name must be prefixed with the namespace followed by ".". For example,
1095   * "user.attr".
1096   * <p/>
1097   * Refer to the HDFS extended attributes user documentation for details.
1098   *
1099   * @param path Path to modify
1100   * @param name xattr name.
1101   * @param value xattr value.
1102   * @param flag xattr set flag
1103   * @throws IOException
1104   */
1105  public void setXAttr(Path path, String name, byte[] value,
1106      EnumSet<XAttrSetFlag> flag) throws IOException {
1107    throw new UnsupportedOperationException(getClass().getSimpleName()
1108        + " doesn't support setXAttr");
1109  }
1110
1111  /**
1112   * Get an xattr for a file or directory.
1113   * The name must be prefixed with the namespace followed by ".". For example,
1114   * "user.attr".
1115   * <p/>
1116   * Refer to the HDFS extended attributes user documentation for details.
1117   *
1118   * @param path Path to get extended attribute
1119   * @param name xattr name.
1120   * @return byte[] xattr value.
1121   * @throws IOException
1122   */
1123  public byte[] getXAttr(Path path, String name) throws IOException {
1124    throw new UnsupportedOperationException(getClass().getSimpleName()
1125        + " doesn't support getXAttr");
1126  }
1127
1128  /**
1129   * Get all of the xattrs for a file or directory.
1130   * Only those xattrs for which the logged-in user has permissions to view
1131   * are returned.
1132   * <p/>
1133   * Refer to the HDFS extended attributes user documentation for details.
1134   *
1135   * @param path Path to get extended attributes
1136   * @return Map<String, byte[]> describing the XAttrs of the file or directory
1137   * @throws IOException
1138   */
1139  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
1140    throw new UnsupportedOperationException(getClass().getSimpleName()
1141        + " doesn't support getXAttrs");
1142  }
1143
1144  /**
1145   * Get all of the xattrs for a file or directory.
1146   * Only those xattrs for which the logged-in user has permissions to view
1147   * are returned.
1148   * <p/>
1149   * Refer to the HDFS extended attributes user documentation for details.
1150   *
1151   * @param path Path to get extended attributes
1152   * @param names XAttr names.
1153   * @return Map<String, byte[]> describing the XAttrs of the file or directory
1154   * @throws IOException
1155   */
1156  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
1157      throws IOException {
1158    throw new UnsupportedOperationException(getClass().getSimpleName()
1159        + " doesn't support getXAttrs");
1160  }
1161
1162  /**
1163   * Get all of the xattr names for a file or directory.
1164   * Only the xattr names for which the logged-in user has permissions to view
1165   * are returned.
1166   * <p/>
1167   * Refer to the HDFS extended attributes user documentation for details.
1168   *
1169   * @param path Path to get extended attributes
1170   * @return Map<String, byte[]> describing the XAttrs of the file or directory
1171   * @throws IOException
1172   */
1173  public List<String> listXAttrs(Path path)
1174          throws IOException {
1175    throw new UnsupportedOperationException(getClass().getSimpleName()
1176            + " doesn't support listXAttrs");
1177  }
1178
1179  /**
1180   * Remove an xattr of a file or directory.
1181   * The name must be prefixed with the namespace followed by ".". For example,
1182   * "user.attr".
1183   * <p/>
1184   * Refer to the HDFS extended attributes user documentation for details.
1185   *
1186   * @param path Path to remove extended attribute
1187   * @param name xattr name
1188   * @throws IOException
1189   */
1190  public void removeXAttr(Path path, String name) throws IOException {
1191    throw new UnsupportedOperationException(getClass().getSimpleName()
1192        + " doesn't support removeXAttr");
1193  }
1194
1195  /**
1196   * The specification of this method matches that of
1197   * {@link FileContext#createSnapshot(Path, String)}.
1198   */
1199  public Path createSnapshot(final Path path, final String snapshotName)
1200      throws IOException {
1201    throw new UnsupportedOperationException(getClass().getSimpleName()
1202        + " doesn't support createSnapshot");
1203  }
1204
1205  /**
1206   * The specification of this method matches that of
1207   * {@link FileContext#renameSnapshot(Path, String, String)}.
1208   */
1209  public void renameSnapshot(final Path path, final String snapshotOldName,
1210      final String snapshotNewName) throws IOException {
1211    throw new UnsupportedOperationException(getClass().getSimpleName()
1212        + " doesn't support renameSnapshot");
1213  }
1214
1215  /**
1216   * The specification of this method matches that of
1217   * {@link FileContext#deleteSnapshot(Path, String)}.
1218   */
1219  public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
1220      throws IOException {
1221    throw new UnsupportedOperationException(getClass().getSimpleName()
1222        + " doesn't support deleteSnapshot");
1223  }
1224
1225  /**
1226   * Set the storage policy for a given file or directory.
1227   *
1228   * @param path file or directory path.
1229   * @param policyName the name of the target storage policy. The list
1230   *                   of supported Storage policies can be retrieved
1231   *                   via {@link #getAllStoragePolicies}.
1232   */
1233  public void setStoragePolicy(final Path path, final String policyName)
1234      throws IOException {
1235    throw new UnsupportedOperationException(getClass().getSimpleName()
1236        + " doesn't support setStoragePolicy");
1237  }
1238
1239  /**
1240   * Retrieve the storage policy for a given file or directory.
1241   *
1242   * @param src file or directory path.
1243   * @return storage policy for give file.
1244   * @throws IOException
1245   */
1246  public BlockStoragePolicySpi getStoragePolicy(final Path src)
1247      throws IOException {
1248    throw new UnsupportedOperationException(getClass().getSimpleName()
1249        + " doesn't support getStoragePolicy");
1250  }
1251
1252  /**
1253   * Retrieve all the storage policies supported by this file system.
1254   *
1255   * @return all storage policies supported by this filesystem.
1256   * @throws IOException
1257   */
1258  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
1259      throws IOException {
1260    throw new UnsupportedOperationException(getClass().getSimpleName()
1261        + " doesn't support getAllStoragePolicies");
1262  }
1263
1264  @Override //Object
1265  public int hashCode() {
1266    return myUri.hashCode();
1267  }
1268  
1269  @Override //Object
1270  public boolean equals(Object other) {
1271    if (other == null || !(other instanceof AbstractFileSystem)) {
1272      return false;
1273    }
1274    return myUri.equals(((AbstractFileSystem) other).myUri);
1275  }
1276}