001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.client;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URI;
023import java.util.EnumSet;
024
025import org.apache.hadoop.classification.InterfaceAudience;
026import org.apache.hadoop.classification.InterfaceStability;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.fs.CacheFlag;
029import org.apache.hadoop.fs.FileSystem;
030import org.apache.hadoop.fs.Path;
031import org.apache.hadoop.fs.RemoteIterator;
032import org.apache.hadoop.fs.StorageType;
033import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
034import org.apache.hadoop.hdfs.DistributedFileSystem;
035import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
036import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
037import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
038import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
039import org.apache.hadoop.hdfs.protocol.EncryptionZone;
040import org.apache.hadoop.hdfs.protocol.HdfsConstants;
041import org.apache.hadoop.security.AccessControlException;
042import org.apache.hadoop.hdfs.tools.DFSAdmin;
043
044/**
045 * The public API for performing administrative functions on HDFS. Those writing
046 * applications against HDFS should prefer this interface to directly accessing
047 * functionality in DistributedFileSystem or DFSClient.
048 * 
049 * Note that this is distinct from the similarly-named {@link DFSAdmin}, which
050 * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
051 * commands.
052 */
053@InterfaceAudience.Public
054@InterfaceStability.Evolving
055public class HdfsAdmin {
056  
057  private DistributedFileSystem dfs;
058  
059  /**
060   * Create a new HdfsAdmin client.
061   * 
062   * @param uri the unique URI of the HDFS file system to administer
063   * @param conf configuration
064   * @throws IOException in the event the file system could not be created
065   */
066  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
067    FileSystem fs = FileSystem.get(uri, conf);
068    if (!(fs instanceof DistributedFileSystem)) {
069      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
070    } else {
071      dfs = (DistributedFileSystem)fs;
072    }
073  }
074  
075  /**
076   * Set the namespace quota (count of files, directories, and sym links) for a
077   * directory.
078   * 
079   * @param src the path to set the quota for
080   * @param quota the value to set for the quota
081   * @throws IOException in the event of error
082   */
083  public void setQuota(Path src, long quota) throws IOException {
084    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
085  }
086  
087  /**
088   * Clear the namespace quota (count of files, directories and sym links) for a
089   * directory.
090   * 
091   * @param src the path to clear the quota of
092   * @throws IOException in the event of error
093   */
094  public void clearQuota(Path src) throws IOException {
095    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
096  }
097  
098  /**
099   * Set the storage space quota (size of files) for a directory. Note that
100   * directories and sym links do not occupy storage space.
101   * 
102   * @param src the path to set the space quota of
103   * @param spaceQuota the value to set for the space quota
104   * @throws IOException in the event of error
105   */
106  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
107    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
108  }
109  
110  /**
111   * Clear the storage space quota (size of files) for a directory. Note that
112   * directories and sym links do not occupy storage space.
113   * 
114   * @param src the path to clear the space quota of
115   * @throws IOException in the event of error
116   */
117  public void clearSpaceQuota(Path src) throws IOException {
118    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
119  }
120
121  /**
122   * Set the quota by storage type for a directory. Note that
123   * directories and sym links do not occupy storage type quota.
124   *
125   * @param src the target directory to set the quota by storage type
126   * @param type the storage type to set for quota by storage type
127   * @param quota the value to set for quota by storage type
128   * @throws IOException in the event of error
129   */
130  public void setQuotaByStorageType(Path src, StorageType type, long quota)
131      throws IOException {
132    dfs.setQuotaByStorageType(src, type, quota);
133  }
134
135  /**
136   * Clear the space quota by storage type for a directory. Note that
137   * directories and sym links do not occupy storage type quota.
138   *
139   * @param src the target directory to clear the quota by storage type
140   * @param type the storage type to clear for quota by storage type
141   * @throws IOException in the event of error
142   */
143  public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
144    dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
145  }
146  
147  /**
148   * Allow snapshot on a directory.
149   * @param path The path of the directory where snapshots will be taken.
150   */
151  public void allowSnapshot(Path path) throws IOException {
152    dfs.allowSnapshot(path);
153  }
154  
155  /**
156   * Disallow snapshot on a directory.
157   * @param path The path of the snapshottable directory.
158   */
159  public void disallowSnapshot(Path path) throws IOException {
160    dfs.disallowSnapshot(path);
161  }
162
163  /**
164   * Add a new CacheDirectiveInfo.
165   * 
166   * @param info Information about a directive to add.
167   * @param flags {@link CacheFlag}s to use for this operation.
168   * @return the ID of the directive that was created.
169   * @throws IOException if the directive could not be added
170   */
171  public long addCacheDirective(CacheDirectiveInfo info,
172      EnumSet<CacheFlag> flags) throws IOException {
173  return dfs.addCacheDirective(info, flags);
174  }
175  
176  /**
177   * Modify a CacheDirective.
178   * 
179   * @param info Information about the directive to modify. You must set the ID
180   *          to indicate which CacheDirective you want to modify.
181   * @param flags {@link CacheFlag}s to use for this operation.
182   * @throws IOException if the directive could not be modified
183   */
184  public void modifyCacheDirective(CacheDirectiveInfo info,
185      EnumSet<CacheFlag> flags) throws IOException {
186    dfs.modifyCacheDirective(info, flags);
187  }
188
189  /**
190   * Remove a CacheDirective.
191   * 
192   * @param id identifier of the CacheDirectiveInfo to remove
193   * @throws IOException if the directive could not be removed
194   */
195  public void removeCacheDirective(long id)
196      throws IOException {
197    dfs.removeCacheDirective(id);
198  }
199
200  /**
201   * List cache directives. Incrementally fetches results from the server.
202   * 
203   * @param filter Filter parameters to use when listing the directives, null to
204   *               list all directives visible to us.
205   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
206   */
207  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
208      CacheDirectiveInfo filter) throws IOException {
209    return dfs.listCacheDirectives(filter);
210  }
211
212  /**
213   * Add a cache pool.
214   *
215   * @param info
216   *          The request to add a cache pool.
217   * @throws IOException 
218   *          If the request could not be completed.
219   */
220  public void addCachePool(CachePoolInfo info) throws IOException {
221    dfs.addCachePool(info);
222  }
223
224  /**
225   * Modify an existing cache pool.
226   *
227   * @param info
228   *          The request to modify a cache pool.
229   * @throws IOException 
230   *          If the request could not be completed.
231   */
232  public void modifyCachePool(CachePoolInfo info) throws IOException {
233    dfs.modifyCachePool(info);
234  }
235    
236  /**
237   * Remove a cache pool.
238   *
239   * @param poolName
240   *          Name of the cache pool to remove.
241   * @throws IOException 
242   *          if the cache pool did not exist, or could not be removed.
243   */
244  public void removeCachePool(String poolName) throws IOException {
245    dfs.removeCachePool(poolName);
246  }
247
248  /**
249   * List all cache pools.
250   *
251   * @return A remote iterator from which you can get CachePoolEntry objects.
252   *          Requests will be made as needed.
253   * @throws IOException
254   *          If there was an error listing cache pools.
255   */
256  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
257    return dfs.listCachePools();
258  }
259
260  /**
261   * Create an encryption zone rooted at an empty existing directory, using the
262   * specified encryption key. An encryption zone has an associated encryption
263   * key used when reading and writing files within the zone.
264   *
265   * @param path    The path of the root of the encryption zone. Must refer to
266   *                an empty, existing directory.
267   * @param keyName Name of key available at the KeyProvider.
268   * @throws IOException            if there was a general IO exception
269   * @throws AccessControlException if the caller does not have access to path
270   * @throws FileNotFoundException  if the path does not exist
271   */
272  public void createEncryptionZone(Path path, String keyName)
273    throws IOException, AccessControlException, FileNotFoundException {
274    dfs.createEncryptionZone(path, keyName);
275  }
276
277  /**
278   * Get the path of the encryption zone for a given file or directory.
279   *
280   * @param path The path to get the ez for.
281   *
282   * @return The EncryptionZone of the ez, or null if path is not in an ez.
283   * @throws IOException            if there was a general IO exception
284   * @throws AccessControlException if the caller does not have access to path
285   * @throws FileNotFoundException  if the path does not exist
286   */
287  public EncryptionZone getEncryptionZoneForPath(Path path)
288    throws IOException, AccessControlException, FileNotFoundException {
289    return dfs.getEZForPath(path);
290  }
291
292  /**
293   * Returns a RemoteIterator which can be used to list the encryption zones
294   * in HDFS. For large numbers of encryption zones, the iterator will fetch
295   * the list of zones in a number of small batches.
296   * <p/>
297   * Since the list is fetched in batches, it does not represent a
298   * consistent snapshot of the entire list of encryption zones.
299   * <p/>
300   * This method can only be called by HDFS superusers.
301   */
302  public RemoteIterator<EncryptionZone> listEncryptionZones()
303      throws IOException {
304    return dfs.listEncryptionZones();
305  }
306
307  /**
308   * Exposes a stream of namesystem events. Only events occurring after the
309   * stream is created are available.
310   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
311   * for information on stream usage.
312   * See {@link org.apache.hadoop.hdfs.inotify.Event}
313   * for information on the available events.
314   * <p/>
315   * Inotify users may want to tune the following HDFS parameters to
316   * ensure that enough extra HDFS edits are saved to support inotify clients
317   * that fall behind the current state of the namespace while reading events.
318   * The default parameter values should generally be reasonable. If edits are
319   * deleted before their corresponding events can be read, clients will see a
320   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
321   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
322   *
323   * It should generally be sufficient to tune these parameters:
324   * dfs.namenode.num.extra.edits.retained
325   * dfs.namenode.max.extra.edits.segments.retained
326   *
327   * Parameters that affect the number of created segments and the number of
328   * edits that are considered necessary, i.e. do not count towards the
329   * dfs.namenode.num.extra.edits.retained quota):
330   * dfs.namenode.checkpoint.period
331   * dfs.namenode.checkpoint.txns
332   * dfs.namenode.num.checkpoints.retained
333   * dfs.ha.log-roll.period
334   * <p/>
335   * It is recommended that local journaling be configured
336   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
337   * so that edit transfers from the shared journal can be avoided.
338   *
339   * @throws IOException If there was an error obtaining the stream.
340   */
341  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
342    return dfs.getInotifyEventStream();
343  }
344
345  /**
346   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
347   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
348   * have access to an FSImage inclusive of lastReadTxid) and only want to read
349   * events after this point.
350   */
351  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
352      throws IOException {
353    return dfs.getInotifyEventStream(lastReadTxid);
354  }
355
356  /**
357   * Set the source path to the specified storage policy.
358   *
359   * @param src The source path referring to either a directory or a file.
360   * @param policyName The name of the storage policy.
361   */
362  public void setStoragePolicy(final Path src, final String policyName)
363      throws IOException {
364    dfs.setStoragePolicy(src, policyName);
365  }
366}