001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.client;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URI;
023import java.util.EnumSet;
024
025import org.apache.hadoop.HadoopIllegalArgumentException;
026import org.apache.hadoop.classification.InterfaceAudience;
027import org.apache.hadoop.classification.InterfaceStability;
028import org.apache.hadoop.conf.Configuration;
029import org.apache.hadoop.fs.CacheFlag;
030import org.apache.hadoop.fs.FileStatus;
031import org.apache.hadoop.fs.FileSystem;
032import org.apache.hadoop.fs.Path;
033import org.apache.hadoop.fs.RemoteIterator;
034import org.apache.hadoop.fs.StorageType;
035import org.apache.hadoop.fs.permission.FsAction;
036import org.apache.hadoop.fs.permission.FsPermission;
037import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
038import org.apache.hadoop.hdfs.DistributedFileSystem;
039import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
040import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
041import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
042import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
043import org.apache.hadoop.hdfs.protocol.EncryptionZone;
044import org.apache.hadoop.hdfs.protocol.HdfsConstants;
045import org.apache.hadoop.security.AccessControlException;
046import org.apache.hadoop.hdfs.tools.DFSAdmin;
047import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
048
049/**
050 * The public API for performing administrative functions on HDFS. Those writing
051 * applications against HDFS should prefer this interface to directly accessing
052 * functionality in DistributedFileSystem or DFSClient.
053 * 
054 * Note that this is distinct from the similarly-named {@link DFSAdmin}, which
055 * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
056 * commands.
057 */
058@InterfaceAudience.Public
059@InterfaceStability.Evolving
060public class HdfsAdmin {
061  
062  private DistributedFileSystem dfs;
063  private static final FsPermission TRASH_PERMISSION = new FsPermission(
064      FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
065  
066  /**
067   * Create a new HdfsAdmin client.
068   * 
069   * @param uri the unique URI of the HDFS file system to administer
070   * @param conf configuration
071   * @throws IOException in the event the file system could not be created
072   */
073  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
074    FileSystem fs = FileSystem.get(uri, conf);
075    if (!(fs instanceof DistributedFileSystem)) {
076      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
077    } else {
078      dfs = (DistributedFileSystem)fs;
079    }
080  }
081  
082  /**
083   * Set the namespace quota (count of files, directories, and sym links) for a
084   * directory.
085   * 
086   * @param src the path to set the quota for
087   * @param quota the value to set for the quota
088   * @throws IOException in the event of error
089   */
090  public void setQuota(Path src, long quota) throws IOException {
091    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
092  }
093  
094  /**
095   * Clear the namespace quota (count of files, directories and sym links) for a
096   * directory.
097   * 
098   * @param src the path to clear the quota of
099   * @throws IOException in the event of error
100   */
101  public void clearQuota(Path src) throws IOException {
102    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
103  }
104  
105  /**
106   * Set the storage space quota (size of files) for a directory. Note that
107   * directories and sym links do not occupy storage space.
108   * 
109   * @param src the path to set the space quota of
110   * @param spaceQuota the value to set for the space quota
111   * @throws IOException in the event of error
112   */
113  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
114    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
115  }
116  
117  /**
118   * Clear the storage space quota (size of files) for a directory. Note that
119   * directories and sym links do not occupy storage space.
120   * 
121   * @param src the path to clear the space quota of
122   * @throws IOException in the event of error
123   */
124  public void clearSpaceQuota(Path src) throws IOException {
125    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
126  }
127
128  /**
129   * Set the quota by storage type for a directory. Note that
130   * directories and sym links do not occupy storage type quota.
131   *
132   * @param src the target directory to set the quota by storage type
133   * @param type the storage type to set for quota by storage type
134   * @param quota the value to set for quota by storage type
135   * @throws IOException in the event of error
136   */
137  public void setQuotaByStorageType(Path src, StorageType type, long quota)
138      throws IOException {
139    dfs.setQuotaByStorageType(src, type, quota);
140  }
141
142  /**
143   * Clear the space quota by storage type for a directory. Note that
144   * directories and sym links do not occupy storage type quota.
145   *
146   * @param src the target directory to clear the quota by storage type
147   * @param type the storage type to clear for quota by storage type
148   * @throws IOException in the event of error
149   */
150  public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
151    dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
152  }
153  
154  /**
155   * Allow snapshot on a directory.
156   * @param path The path of the directory where snapshots will be taken.
157   */
158  public void allowSnapshot(Path path) throws IOException {
159    dfs.allowSnapshot(path);
160  }
161  
162  /**
163   * Disallow snapshot on a directory.
164   * @param path The path of the snapshottable directory.
165   */
166  public void disallowSnapshot(Path path) throws IOException {
167    dfs.disallowSnapshot(path);
168  }
169
170  /**
171   * Add a new CacheDirectiveInfo.
172   * 
173   * @param info Information about a directive to add.
174   * @param flags {@link CacheFlag}s to use for this operation.
175   * @return the ID of the directive that was created.
176   * @throws IOException if the directive could not be added
177   */
178  public long addCacheDirective(CacheDirectiveInfo info,
179      EnumSet<CacheFlag> flags) throws IOException {
180  return dfs.addCacheDirective(info, flags);
181  }
182  
183  /**
184   * Modify a CacheDirective.
185   * 
186   * @param info Information about the directive to modify. You must set the ID
187   *          to indicate which CacheDirective you want to modify.
188   * @param flags {@link CacheFlag}s to use for this operation.
189   * @throws IOException if the directive could not be modified
190   */
191  public void modifyCacheDirective(CacheDirectiveInfo info,
192      EnumSet<CacheFlag> flags) throws IOException {
193    dfs.modifyCacheDirective(info, flags);
194  }
195
196  /**
197   * Remove a CacheDirective.
198   * 
199   * @param id identifier of the CacheDirectiveInfo to remove
200   * @throws IOException if the directive could not be removed
201   */
202  public void removeCacheDirective(long id)
203      throws IOException {
204    dfs.removeCacheDirective(id);
205  }
206
207  /**
208   * List cache directives. Incrementally fetches results from the server.
209   * 
210   * @param filter Filter parameters to use when listing the directives, null to
211   *               list all directives visible to us.
212   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
213   */
214  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
215      CacheDirectiveInfo filter) throws IOException {
216    return dfs.listCacheDirectives(filter);
217  }
218
219  /**
220   * Add a cache pool.
221   *
222   * @param info
223   *          The request to add a cache pool.
224   * @throws IOException 
225   *          If the request could not be completed.
226   */
227  public void addCachePool(CachePoolInfo info) throws IOException {
228    dfs.addCachePool(info);
229  }
230
231  /**
232   * Modify an existing cache pool.
233   *
234   * @param info
235   *          The request to modify a cache pool.
236   * @throws IOException 
237   *          If the request could not be completed.
238   */
239  public void modifyCachePool(CachePoolInfo info) throws IOException {
240    dfs.modifyCachePool(info);
241  }
242    
243  /**
244   * Remove a cache pool.
245   *
246   * @param poolName
247   *          Name of the cache pool to remove.
248   * @throws IOException 
249   *          if the cache pool did not exist, or could not be removed.
250   */
251  public void removeCachePool(String poolName) throws IOException {
252    dfs.removeCachePool(poolName);
253  }
254
255  /**
256   * List all cache pools.
257   *
258   * @return A remote iterator from which you can get CachePoolEntry objects.
259   *          Requests will be made as needed.
260   * @throws IOException
261   *          If there was an error listing cache pools.
262   */
263  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
264    return dfs.listCachePools();
265  }
266
267  /**
268   * Create an encryption zone rooted at an empty existing directory, using the
269   * specified encryption key. An encryption zone has an associated encryption
270   * key used when reading and writing files within the zone.
271   *
272   * @param path    The path of the root of the encryption zone. Must refer to
273   *                an empty, existing directory.
274   * @param keyName Name of key available at the KeyProvider.
275   * @throws IOException            if there was a general IO exception
276   * @throws AccessControlException if the caller does not have access to path
277   * @throws FileNotFoundException  if the path does not exist
278   */
279  @Deprecated
280  public void createEncryptionZone(Path path, String keyName)
281      throws IOException, AccessControlException, FileNotFoundException {
282    dfs.createEncryptionZone(path, keyName);
283  }
284
285  /**
286   * Create an encryption zone rooted at an empty existing directory, using the
287   * specified encryption key. An encryption zone has an associated encryption
288   * key used when reading and writing files within the zone.
289   *
290   * Additional options, such as provisioning the trash directory, can be
291   * specified using {@link CreateEncryptionZoneFlag} flags.
292   *
293   * @param path    The path of the root of the encryption zone. Must refer to
294   *                an empty, existing directory.
295   * @param keyName Name of key available at the KeyProvider.
296   * @param flags   flags for this operation.
297   * @throws IOException            if there was a general IO exception
298   * @throws AccessControlException if the caller does not have access to path
299   * @throws FileNotFoundException  if the path does not exist
300   * @throws HadoopIllegalArgumentException if the flags are invalid
301   */
302  public void createEncryptionZone(Path path, String keyName,
303      EnumSet<CreateEncryptionZoneFlag> flags)
304      throws IOException, AccessControlException, FileNotFoundException,
305      HadoopIllegalArgumentException{
306    dfs.createEncryptionZone(path, keyName);
307    if (flags.contains(CreateEncryptionZoneFlag.PROVISION_TRASH)) {
308      if (flags.contains(CreateEncryptionZoneFlag.NO_TRASH)) {
309        throw new HadoopIllegalArgumentException(
310            "can not have both PROVISION_TRASH and NO_TRASH flags");
311      }
312      this.provisionEZTrash(path);
313    }
314  }
315
316  /**
317   * Provision a trash directory for a given encryption zone.
318
319   * @param path the root of the encryption zone
320   * @throws IOException if the trash directory can not be created.
321   */
322  public void provisionEncryptionZoneTrash(Path path) throws IOException {
323    this.provisionEZTrash(path);
324  }
325
326  /**
327   * Get the path of the encryption zone for a given file or directory.
328   *
329   * @param path The path to get the ez for.
330   *
331   * @return The EncryptionZone of the ez, or null if path is not in an ez.
332   * @throws IOException            if there was a general IO exception
333   * @throws AccessControlException if the caller does not have access to path
334   * @throws FileNotFoundException  if the path does not exist
335   */
336  public EncryptionZone getEncryptionZoneForPath(Path path)
337    throws IOException, AccessControlException, FileNotFoundException {
338    return dfs.getEZForPath(path);
339  }
340
341  /**
342   * Returns a RemoteIterator which can be used to list the encryption zones
343   * in HDFS. For large numbers of encryption zones, the iterator will fetch
344   * the list of zones in a number of small batches.
345   * <p/>
346   * Since the list is fetched in batches, it does not represent a
347   * consistent snapshot of the entire list of encryption zones.
348   * <p/>
349   * This method can only be called by HDFS superusers.
350   */
351  public RemoteIterator<EncryptionZone> listEncryptionZones()
352      throws IOException {
353    return dfs.listEncryptionZones();
354  }
355
356  /**
357   * Exposes a stream of namesystem events. Only events occurring after the
358   * stream is created are available.
359   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
360   * for information on stream usage.
361   * See {@link org.apache.hadoop.hdfs.inotify.Event}
362   * for information on the available events.
363   * <p/>
364   * Inotify users may want to tune the following HDFS parameters to
365   * ensure that enough extra HDFS edits are saved to support inotify clients
366   * that fall behind the current state of the namespace while reading events.
367   * The default parameter values should generally be reasonable. If edits are
368   * deleted before their corresponding events can be read, clients will see a
369   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
370   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
371   *
372   * It should generally be sufficient to tune these parameters:
373   * dfs.namenode.num.extra.edits.retained
374   * dfs.namenode.max.extra.edits.segments.retained
375   *
376   * Parameters that affect the number of created segments and the number of
377   * edits that are considered necessary, i.e. do not count towards the
378   * dfs.namenode.num.extra.edits.retained quota):
379   * dfs.namenode.checkpoint.period
380   * dfs.namenode.checkpoint.txns
381   * dfs.namenode.num.checkpoints.retained
382   * dfs.ha.log-roll.period
383   * <p/>
384   * It is recommended that local journaling be configured
385   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
386   * so that edit transfers from the shared journal can be avoided.
387   *
388   * @throws IOException If there was an error obtaining the stream.
389   */
390  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
391    return dfs.getInotifyEventStream();
392  }
393
394  /**
395   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
396   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
397   * have access to an FSImage inclusive of lastReadTxid) and only want to read
398   * events after this point.
399   */
400  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
401      throws IOException {
402    return dfs.getInotifyEventStream(lastReadTxid);
403  }
404
405  /**
406   * Set the source path to the specified storage policy.
407   *
408   * @param src The source path referring to either a directory or a file.
409   * @param policyName The name of the storage policy.
410   */
411  public void setStoragePolicy(final Path src, final String policyName)
412      throws IOException {
413    dfs.setStoragePolicy(src, policyName);
414  }
415
416  /**
417   * Set the source path to the specified erasure coding policy.
418   *
419   * @param path The source path referring to a directory.
420   * @param ecPolicy The erasure coding policy for the directory.
421   *                 If null, the default will be used.
422   * @throws IOException
423   */
424  public void setErasureCodingPolicy(final Path path,
425      final ErasureCodingPolicy ecPolicy) throws IOException {
426    dfs.setErasureCodingPolicy(path, ecPolicy);
427  }
428
429  /**
430   * Get the erasure coding policy information for the specified path
431   *
432   * @param path
433   * @return Returns the policy information if file or directory on the path is
434   *          erasure coded. Null otherwise.
435   * @throws IOException
436   */
437  public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
438      throws IOException {
439    return dfs.getErasureCodingPolicy(path);
440  }
441
442  /**
443   * Get the Erasure coding policies supported.
444   *
445   * @throws IOException
446   */
447  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
448    return dfs.getClient().getErasureCodingPolicies();
449  }
450
451  private void provisionEZTrash(Path path) throws IOException {
452    // make sure the path is an EZ
453    EncryptionZone ez = dfs.getEZForPath(path);
454    if (ez == null) {
455      throw new IllegalArgumentException(path + " is not an encryption zone.");
456    }
457
458    String ezPath = ez.getPath();
459    if (!path.toString().equals(ezPath)) {
460      throw new IllegalArgumentException(path + " is not the root of an " +
461          "encryption zone. Do you mean " + ez.getPath() + "?");
462    }
463
464    // check if the trash directory exists
465
466    Path trashPath = new Path(ez.getPath(), FileSystem.TRASH_PREFIX);
467
468    if (dfs.exists(trashPath)) {
469      String errMessage = "Will not provision new trash directory for " +
470          "encryption zone " + ez.getPath() + ". Path already exists.";
471      FileStatus trashFileStatus = dfs.getFileStatus(trashPath);
472      if (!trashFileStatus.isDirectory()) {
473        errMessage += "\r\n" +
474            "Warning: " + trashPath.toString() + " is not a directory";
475      }
476      if (!trashFileStatus.getPermission().equals(TRASH_PERMISSION)) {
477        errMessage += "\r\n" +
478            "Warning: the permission of " +
479            trashPath.toString() + " is not " + TRASH_PERMISSION;
480      }
481      throw new IOException(errMessage);
482    }
483
484    // Update the permission bits
485    dfs.mkdir(trashPath, TRASH_PERMISSION);
486    dfs.setPermission(trashPath, TRASH_PERMISSION);
487  }
488
489}