001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.client;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URI;
023import java.util.EnumSet;
024
025import org.apache.hadoop.classification.InterfaceAudience;
026import org.apache.hadoop.classification.InterfaceStability;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.fs.CacheFlag;
029import org.apache.hadoop.fs.FileSystem;
030import org.apache.hadoop.fs.Path;
031import org.apache.hadoop.fs.RemoteIterator;
032import org.apache.hadoop.fs.StorageType;
033import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
034import org.apache.hadoop.hdfs.DistributedFileSystem;
035import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
036import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
037import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
038import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
039import org.apache.hadoop.hdfs.protocol.EncryptionZone;
040import org.apache.hadoop.hdfs.protocol.HdfsConstants;
041import org.apache.hadoop.security.AccessControlException;
042import org.apache.hadoop.hdfs.tools.DFSAdmin;
043import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
044
045/**
046 * The public API for performing administrative functions on HDFS. Those writing
047 * applications against HDFS should prefer this interface to directly accessing
048 * functionality in DistributedFileSystem or DFSClient.
049 * 
050 * Note that this is distinct from the similarly-named {@link DFSAdmin}, which
051 * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
052 * commands.
053 */
054@InterfaceAudience.Public
055@InterfaceStability.Evolving
056public class HdfsAdmin {
057  
058  private DistributedFileSystem dfs;
059  
060  /**
061   * Create a new HdfsAdmin client.
062   * 
063   * @param uri the unique URI of the HDFS file system to administer
064   * @param conf configuration
065   * @throws IOException in the event the file system could not be created
066   */
067  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
068    FileSystem fs = FileSystem.get(uri, conf);
069    if (!(fs instanceof DistributedFileSystem)) {
070      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
071    } else {
072      dfs = (DistributedFileSystem)fs;
073    }
074  }
075  
076  /**
077   * Set the namespace quota (count of files, directories, and sym links) for a
078   * directory.
079   * 
080   * @param src the path to set the quota for
081   * @param quota the value to set for the quota
082   * @throws IOException in the event of error
083   */
084  public void setQuota(Path src, long quota) throws IOException {
085    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
086  }
087  
088  /**
089   * Clear the namespace quota (count of files, directories and sym links) for a
090   * directory.
091   * 
092   * @param src the path to clear the quota of
093   * @throws IOException in the event of error
094   */
095  public void clearQuota(Path src) throws IOException {
096    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
097  }
098  
099  /**
100   * Set the storage space quota (size of files) for a directory. Note that
101   * directories and sym links do not occupy storage space.
102   * 
103   * @param src the path to set the space quota of
104   * @param spaceQuota the value to set for the space quota
105   * @throws IOException in the event of error
106   */
107  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
108    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
109  }
110  
111  /**
112   * Clear the storage space quota (size of files) for a directory. Note that
113   * directories and sym links do not occupy storage space.
114   * 
115   * @param src the path to clear the space quota of
116   * @throws IOException in the event of error
117   */
118  public void clearSpaceQuota(Path src) throws IOException {
119    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
120  }
121
122  /**
123   * Set the quota by storage type for a directory. Note that
124   * directories and sym links do not occupy storage type quota.
125   *
126   * @param src the target directory to set the quota by storage type
127   * @param type the storage type to set for quota by storage type
128   * @param quota the value to set for quota by storage type
129   * @throws IOException in the event of error
130   */
131  public void setQuotaByStorageType(Path src, StorageType type, long quota)
132      throws IOException {
133    dfs.setQuotaByStorageType(src, type, quota);
134  }
135
136  /**
137   * Clear the space quota by storage type for a directory. Note that
138   * directories and sym links do not occupy storage type quota.
139   *
140   * @param src the target directory to clear the quota by storage type
141   * @param type the storage type to clear for quota by storage type
142   * @throws IOException in the event of error
143   */
144  public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
145    dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
146  }
147  
148  /**
149   * Allow snapshot on a directory.
150   * @param path The path of the directory where snapshots will be taken.
151   */
152  public void allowSnapshot(Path path) throws IOException {
153    dfs.allowSnapshot(path);
154  }
155  
156  /**
157   * Disallow snapshot on a directory.
158   * @param path The path of the snapshottable directory.
159   */
160  public void disallowSnapshot(Path path) throws IOException {
161    dfs.disallowSnapshot(path);
162  }
163
164  /**
165   * Add a new CacheDirectiveInfo.
166   * 
167   * @param info Information about a directive to add.
168   * @param flags {@link CacheFlag}s to use for this operation.
169   * @return the ID of the directive that was created.
170   * @throws IOException if the directive could not be added
171   */
172  public long addCacheDirective(CacheDirectiveInfo info,
173      EnumSet<CacheFlag> flags) throws IOException {
174  return dfs.addCacheDirective(info, flags);
175  }
176  
177  /**
178   * Modify a CacheDirective.
179   * 
180   * @param info Information about the directive to modify. You must set the ID
181   *          to indicate which CacheDirective you want to modify.
182   * @param flags {@link CacheFlag}s to use for this operation.
183   * @throws IOException if the directive could not be modified
184   */
185  public void modifyCacheDirective(CacheDirectiveInfo info,
186      EnumSet<CacheFlag> flags) throws IOException {
187    dfs.modifyCacheDirective(info, flags);
188  }
189
190  /**
191   * Remove a CacheDirective.
192   * 
193   * @param id identifier of the CacheDirectiveInfo to remove
194   * @throws IOException if the directive could not be removed
195   */
196  public void removeCacheDirective(long id)
197      throws IOException {
198    dfs.removeCacheDirective(id);
199  }
200
201  /**
202   * List cache directives. Incrementally fetches results from the server.
203   * 
204   * @param filter Filter parameters to use when listing the directives, null to
205   *               list all directives visible to us.
206   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
207   */
208  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
209      CacheDirectiveInfo filter) throws IOException {
210    return dfs.listCacheDirectives(filter);
211  }
212
213  /**
214   * Add a cache pool.
215   *
216   * @param info
217   *          The request to add a cache pool.
218   * @throws IOException 
219   *          If the request could not be completed.
220   */
221  public void addCachePool(CachePoolInfo info) throws IOException {
222    dfs.addCachePool(info);
223  }
224
225  /**
226   * Modify an existing cache pool.
227   *
228   * @param info
229   *          The request to modify a cache pool.
230   * @throws IOException 
231   *          If the request could not be completed.
232   */
233  public void modifyCachePool(CachePoolInfo info) throws IOException {
234    dfs.modifyCachePool(info);
235  }
236    
237  /**
238   * Remove a cache pool.
239   *
240   * @param poolName
241   *          Name of the cache pool to remove.
242   * @throws IOException 
243   *          if the cache pool did not exist, or could not be removed.
244   */
245  public void removeCachePool(String poolName) throws IOException {
246    dfs.removeCachePool(poolName);
247  }
248
249  /**
250   * List all cache pools.
251   *
252   * @return A remote iterator from which you can get CachePoolEntry objects.
253   *          Requests will be made as needed.
254   * @throws IOException
255   *          If there was an error listing cache pools.
256   */
257  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
258    return dfs.listCachePools();
259  }
260
261  /**
262   * Create an encryption zone rooted at an empty existing directory, using the
263   * specified encryption key. An encryption zone has an associated encryption
264   * key used when reading and writing files within the zone.
265   *
266   * @param path    The path of the root of the encryption zone. Must refer to
267   *                an empty, existing directory.
268   * @param keyName Name of key available at the KeyProvider.
269   * @throws IOException            if there was a general IO exception
270   * @throws AccessControlException if the caller does not have access to path
271   * @throws FileNotFoundException  if the path does not exist
272   */
273  public void createEncryptionZone(Path path, String keyName)
274    throws IOException, AccessControlException, FileNotFoundException {
275    dfs.createEncryptionZone(path, keyName);
276  }
277
278  /**
279   * Get the path of the encryption zone for a given file or directory.
280   *
281   * @param path The path to get the ez for.
282   *
283   * @return The EncryptionZone of the ez, or null if path is not in an ez.
284   * @throws IOException            if there was a general IO exception
285   * @throws AccessControlException if the caller does not have access to path
286   * @throws FileNotFoundException  if the path does not exist
287   */
288  public EncryptionZone getEncryptionZoneForPath(Path path)
289    throws IOException, AccessControlException, FileNotFoundException {
290    return dfs.getEZForPath(path);
291  }
292
293  /**
294   * Returns a RemoteIterator which can be used to list the encryption zones
295   * in HDFS. For large numbers of encryption zones, the iterator will fetch
296   * the list of zones in a number of small batches.
297   * <p/>
298   * Since the list is fetched in batches, it does not represent a
299   * consistent snapshot of the entire list of encryption zones.
300   * <p/>
301   * This method can only be called by HDFS superusers.
302   */
303  public RemoteIterator<EncryptionZone> listEncryptionZones()
304      throws IOException {
305    return dfs.listEncryptionZones();
306  }
307
308  /**
309   * Exposes a stream of namesystem events. Only events occurring after the
310   * stream is created are available.
311   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
312   * for information on stream usage.
313   * See {@link org.apache.hadoop.hdfs.inotify.Event}
314   * for information on the available events.
315   * <p/>
316   * Inotify users may want to tune the following HDFS parameters to
317   * ensure that enough extra HDFS edits are saved to support inotify clients
318   * that fall behind the current state of the namespace while reading events.
319   * The default parameter values should generally be reasonable. If edits are
320   * deleted before their corresponding events can be read, clients will see a
321   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
322   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
323   *
324   * It should generally be sufficient to tune these parameters:
325   * dfs.namenode.num.extra.edits.retained
326   * dfs.namenode.max.extra.edits.segments.retained
327   *
328   * Parameters that affect the number of created segments and the number of
329   * edits that are considered necessary, i.e. do not count towards the
330   * dfs.namenode.num.extra.edits.retained quota):
331   * dfs.namenode.checkpoint.period
332   * dfs.namenode.checkpoint.txns
333   * dfs.namenode.num.checkpoints.retained
334   * dfs.ha.log-roll.period
335   * <p/>
336   * It is recommended that local journaling be configured
337   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
338   * so that edit transfers from the shared journal can be avoided.
339   *
340   * @throws IOException If there was an error obtaining the stream.
341   */
342  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
343    return dfs.getInotifyEventStream();
344  }
345
346  /**
347   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
348   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
349   * have access to an FSImage inclusive of lastReadTxid) and only want to read
350   * events after this point.
351   */
352  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
353      throws IOException {
354    return dfs.getInotifyEventStream(lastReadTxid);
355  }
356
357  /**
358   * Set the source path to the specified storage policy.
359   *
360   * @param src The source path referring to either a directory or a file.
361   * @param policyName The name of the storage policy.
362   */
363  public void setStoragePolicy(final Path src, final String policyName)
364      throws IOException {
365    dfs.setStoragePolicy(src, policyName);
366  }
367
368  /**
369   * Set the source path to the specified erasure coding policy.
370   *
371   * @param path The source path referring to a directory.
372   * @param ecPolicy The erasure coding policy for the directory.
373   *                 If null, the default will be used.
374   * @throws IOException
375   */
376  public void setErasureCodingPolicy(final Path path,
377      final ErasureCodingPolicy ecPolicy) throws IOException {
378    dfs.setErasureCodingPolicy(path, ecPolicy);
379  }
380
381  /**
382   * Get the erasure coding policy information for the specified path
383   *
384   * @param path
385   * @return Returns the policy information if file or directory on the path is
386   *          erasure coded. Null otherwise.
387   * @throws IOException
388   */
389  public ErasureCodingPolicy getErasureCodingPolicy(final Path path)
390      throws IOException {
391    return dfs.getErasureCodingPolicy(path);
392  }
393
394  /**
395   * Get the Erasure coding policies supported.
396   *
397   * @throws IOException
398   */
399  public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
400    return dfs.getClient().getErasureCodingPolicies();
401  }
402}