001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019package org.apache.hadoop.hdfs;
020
021import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
022import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
023import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
024import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
025import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
026import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
027import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
028import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
029import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
030import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
031import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
032import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
033import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
034import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
035import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
036import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
037import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
038import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
039
040import java.io.IOException;
041import java.io.PrintStream;
042import java.io.UnsupportedEncodingException;
043import java.net.InetAddress;
044import java.net.InetSocketAddress;
045import java.net.URI;
046import java.net.URISyntaxException;
047import java.nio.charset.StandardCharsets;
048import java.security.SecureRandom;
049import java.text.SimpleDateFormat;
050import java.util.Arrays;
051import java.util.Collection;
052import java.util.Collections;
053import java.util.Comparator;
054import java.util.Date;
055import java.util.HashSet;
056import java.util.List;
057import java.util.Locale;
058import java.util.Map;
059import java.util.Random;
060import java.util.Set;
061
062import javax.net.SocketFactory;
063
064import com.google.common.collect.Sets;
065import org.apache.commons.cli.CommandLine;
066import org.apache.commons.cli.CommandLineParser;
067import org.apache.commons.cli.Option;
068import org.apache.commons.cli.Options;
069import org.apache.commons.cli.ParseException;
070import org.apache.commons.cli.PosixParser;
071import org.apache.commons.logging.Log;
072import org.apache.commons.logging.LogFactory;
073import org.apache.hadoop.HadoopIllegalArgumentException;
074import org.apache.hadoop.classification.InterfaceAudience;
075import org.apache.hadoop.conf.Configuration;
076import org.apache.hadoop.crypto.key.KeyProvider;
077import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
078import org.apache.hadoop.crypto.key.KeyProviderFactory;
079import org.apache.hadoop.fs.BlockLocation;
080import org.apache.hadoop.fs.CommonConfigurationKeys;
081import org.apache.hadoop.fs.FileSystem;
082import org.apache.hadoop.fs.Path;
083import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
084import org.apache.hadoop.hdfs.protocol.DatanodeID;
085import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
086import org.apache.hadoop.hdfs.protocol.HdfsConstants;
087import org.apache.hadoop.hdfs.protocol.LocatedBlock;
088import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
089import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
090import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
091import org.apache.hadoop.hdfs.server.namenode.NameNode;
092import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
093import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
094import org.apache.hadoop.http.HttpConfig;
095import org.apache.hadoop.http.HttpServer2;
096import org.apache.hadoop.ipc.ProtobufRpcEngine;
097import org.apache.hadoop.ipc.RPC;
098import org.apache.hadoop.net.NetUtils;
099import org.apache.hadoop.net.NodeBase;
100import org.apache.hadoop.security.SecurityUtil;
101import org.apache.hadoop.security.UserGroupInformation;
102import org.apache.hadoop.security.authorize.AccessControlList;
103import org.apache.hadoop.util.StringUtils;
104import org.apache.hadoop.util.ToolRunner;
105
106import com.google.common.annotations.VisibleForTesting;
107import com.google.common.base.Joiner;
108import com.google.common.base.Preconditions;
109import com.google.common.collect.Lists;
110import com.google.common.collect.Maps;
111import com.google.common.primitives.SignedBytes;
112import com.google.protobuf.BlockingService;
113
114@InterfaceAudience.Private
115public class DFSUtil {
116  public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName());
117  
118  public static final byte[] EMPTY_BYTES = {};
119
120  /** Compare two byte arrays by lexicographical order. */
121  public static int compareBytes(byte[] left, byte[] right) {
122    if (left == null) {
123      left = EMPTY_BYTES;
124    }
125    if (right == null) {
126      right = EMPTY_BYTES;
127    }
128    return SignedBytes.lexicographicalComparator().compare(left, right);
129  }
130
131  private DFSUtil() { /* Hidden constructor */ }
132  private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
133    @Override
134    protected Random initialValue() {
135      return new Random();
136    }
137  };
138  
139  private static final ThreadLocal<SecureRandom> SECURE_RANDOM = new ThreadLocal<SecureRandom>() {
140    @Override
141    protected SecureRandom initialValue() {
142      return new SecureRandom();
143    }
144  };
145
146  /** @return a pseudo random number generator. */
147  public static Random getRandom() {
148    return RANDOM.get();
149  }
150  
151  /** @return a pseudo secure random number generator. */
152  public static SecureRandom getSecureRandom() {
153    return SECURE_RANDOM.get();
154  }
155
156  /** Shuffle the elements in the given array. */
157  public static <T> T[] shuffle(final T[] array) {
158    if (array != null && array.length > 0) {
159      final Random random = getRandom();
160      for (int n = array.length; n > 1; ) {
161        final int randomIndex = random.nextInt(n);
162        n--;
163        if (n != randomIndex) {
164          final T tmp = array[randomIndex];
165          array[randomIndex] = array[n];
166          array[n] = tmp;
167        }
168      }
169    }
170    return array;
171  }
172
173  /**
174   * Compartor for sorting DataNodeInfo[] based on decommissioned states.
175   * Decommissioned nodes are moved to the end of the array on sorting with
176   * this compartor.
177   */
178  public static final Comparator<DatanodeInfo> DECOM_COMPARATOR = 
179    new Comparator<DatanodeInfo>() {
180      @Override
181      public int compare(DatanodeInfo a, DatanodeInfo b) {
182        return a.isDecommissioned() == b.isDecommissioned() ? 0 : 
183          a.isDecommissioned() ? 1 : -1;
184      }
185    };
186
187
188  /**
189   * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
190   * Decommissioned/stale nodes are moved to the end of the array on sorting
191   * with this comparator.
192   */ 
193  @InterfaceAudience.Private 
194  public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
195    private final long staleInterval;
196
197    /**
198     * Constructor of DecomStaleComparator
199     * 
200     * @param interval
201     *          The time interval for marking datanodes as stale is passed from
202     *          outside, since the interval may be changed dynamically
203     */
204    public DecomStaleComparator(long interval) {
205      this.staleInterval = interval;
206    }
207
208    @Override
209    public int compare(DatanodeInfo a, DatanodeInfo b) {
210      // Decommissioned nodes will still be moved to the end of the list
211      if (a.isDecommissioned()) {
212        return b.isDecommissioned() ? 0 : 1;
213      } else if (b.isDecommissioned()) {
214        return -1;
215      }
216      // Stale nodes will be moved behind the normal nodes
217      boolean aStale = a.isStale(staleInterval);
218      boolean bStale = b.isStale(staleInterval);
219      return aStale == bStale ? 0 : (aStale ? 1 : -1);
220    }
221  }    
222    
223  /**
224   * Address matcher for matching an address to local address
225   */
226  static final AddressMatcher LOCAL_ADDRESS_MATCHER = new AddressMatcher() {
227    @Override
228    public boolean match(InetSocketAddress s) {
229      return NetUtils.isLocalAddress(s.getAddress());
230    };
231  };
232  
233  /**
234   * Whether the pathname is valid.  Currently prohibits relative paths, 
235   * names which contain a ":" or "//", or other non-canonical paths.
236   */
237  public static boolean isValidName(String src) {
238    // Path must be absolute.
239    if (!src.startsWith(Path.SEPARATOR)) {
240      return false;
241    }
242      
243    // Check for ".." "." ":" "/"
244    String[] components = StringUtils.split(src, '/');
245    for (int i = 0; i < components.length; i++) {
246      String element = components[i];
247      if (element.equals(".")  ||
248          (element.indexOf(":") >= 0)  ||
249          (element.indexOf("/") >= 0)) {
250        return false;
251      }
252      // ".." is allowed in path starting with /.reserved/.inodes
253      if (element.equals("..")) {
254        if (components.length > 4
255            && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
256            && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
257          continue;
258        }
259        return false;
260      }
261      // The string may start or end with a /, but not have
262      // "//" in the middle.
263      if (element.isEmpty() && i != components.length - 1 &&
264          i != 0) {
265        return false;
266      }
267    }
268    return true;
269  }
270
271  /**
272   * Checks if a string is a valid path component. For instance, components
273   * cannot contain a ":" or "/", and cannot be equal to a reserved component
274   * like ".snapshot".
275   * <p>
276   * The primary use of this method is for validating paths when loading the
277   * FSImage. During normal NN operation, paths are sometimes allowed to
278   * contain reserved components.
279   * 
280   * @return If component is valid
281   */
282  public static boolean isValidNameForComponent(String component) {
283    if (component.equals(".") ||
284        component.equals("..") ||
285        component.indexOf(":") >= 0 ||
286        component.indexOf("/") >= 0) {
287      return false;
288    }
289    return !isReservedPathComponent(component);
290  }
291
292
293  /**
294   * Returns if the component is reserved.
295   * 
296   * <p>
297   * Note that some components are only reserved under certain directories, e.g.
298   * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
299   * @return true, if the component is reserved
300   */
301  public static boolean isReservedPathComponent(String component) {
302    for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
303      if (component.equals(reserved)) {
304        return true;
305      }
306    }
307    return false;
308  }
309
310  /**
311   * Converts a byte array to a string using UTF8 encoding.
312   */
313  public static String bytes2String(byte[] bytes) {
314    return bytes2String(bytes, 0, bytes.length);
315  }
316
317  // Using the charset canonical name for String/byte[] conversions is much
318  // more efficient due to use of cached encoders/decoders.
319  private static final String UTF8_CSN = StandardCharsets.UTF_8.name();
320
321  /**
322   * Decode a specific range of bytes of the given byte array to a string
323   * using UTF8.
324   * 
325   * @param bytes The bytes to be decoded into characters
326   * @param offset The index of the first byte to decode
327   * @param length The number of bytes to decode
328   * @return The decoded string
329   */
330  public static String bytes2String(byte[] bytes, int offset, int length) {
331    try {
332      return new String(bytes, offset, length, UTF8_CSN);
333    } catch (UnsupportedEncodingException e) {
334      // should never happen!
335      throw new IllegalArgumentException("UTF8 encoding is not supported", e);
336    }
337  }
338
339  /**
340   * Converts a string to a byte array using UTF8 encoding.
341   */
342  public static byte[] string2Bytes(String str) {
343    try {
344      return str.getBytes(UTF8_CSN);
345    } catch (UnsupportedEncodingException e) {
346      // should never happen!
347      throw new IllegalArgumentException("UTF8 decoding is not supported", e);
348    }
349
350  }
351
352  /**
353   * Given a list of path components returns a path as a UTF8 String
354   */
355  public static String byteArray2PathString(final byte[][] components,
356      final int offset, final int length) {
357    // specifically not using StringBuilder to more efficiently build
358    // string w/o excessive byte[] copies and charset conversions.
359    final int range = offset + length;
360    Preconditions.checkPositionIndexes(offset, range, components.length);
361    if (length == 0) {
362      return "";
363    }
364    // absolute paths start with either null or empty byte[]
365    byte[] firstComponent = components[offset];
366    boolean isAbsolute = (offset == 0 &&
367        (firstComponent == null || firstComponent.length == 0));
368    if (offset == 0 && length == 1) {
369      return isAbsolute ? Path.SEPARATOR : bytes2String(firstComponent);
370    }
371    // compute length of full byte[], seed with 1st component and delimiters
372    int pos = isAbsolute ? 0 : firstComponent.length;
373    int size = pos + length - 1;
374    for (int i=offset + 1; i < range; i++) {
375      size += components[i].length;
376    }
377    final byte[] result = new byte[size];
378    if (!isAbsolute) {
379      System.arraycopy(firstComponent, 0, result, 0, firstComponent.length);
380    }
381    // append remaining components as "/component".
382    for (int i=offset + 1; i < range; i++) {
383      result[pos++] = (byte)Path.SEPARATOR_CHAR;
384      int len = components[i].length;
385      System.arraycopy(components[i], 0, result, pos, len);
386      pos += len;
387    }
388    return bytes2String(result);
389  }
390
391  public static String byteArray2PathString(byte[][] pathComponents) {
392    return byteArray2PathString(pathComponents, 0, pathComponents.length);
393  }
394
395  /**
396   * Converts a list of path components into a path using Path.SEPARATOR.
397   * 
398   * @param components Path components
399   * @return Combined path as a UTF-8 string
400   */
401  public static String strings2PathString(String[] components) {
402    if (components.length == 0) {
403      return "";
404    }
405    if (components.length == 1) {
406      if (components[0] == null || components[0].isEmpty()) {
407        return Path.SEPARATOR;
408      }
409    }
410    return Joiner.on(Path.SEPARATOR).join(components);
411  }
412
413  /**
414   * Given a list of path components returns a byte array
415   */
416  public static byte[] byteArray2bytes(byte[][] pathComponents) {
417    if (pathComponents.length == 0) {
418      return EMPTY_BYTES;
419    } else if (pathComponents.length == 1
420        && (pathComponents[0] == null || pathComponents[0].length == 0)) {
421      return new byte[]{(byte) Path.SEPARATOR_CHAR};
422    }
423    int length = 0;
424    for (int i = 0; i < pathComponents.length; i++) {
425      length += pathComponents[i].length;
426      if (i < pathComponents.length - 1) {
427        length++; // for SEPARATOR
428      }
429    }
430    byte[] path = new byte[length];
431    int index = 0;
432    for (int i = 0; i < pathComponents.length; i++) {
433      System.arraycopy(pathComponents[i], 0, path, index,
434          pathComponents[i].length);
435      index += pathComponents[i].length;
436      if (i < pathComponents.length - 1) {
437        path[index] = (byte) Path.SEPARATOR_CHAR;
438        index++;
439      }
440    }
441    return path;
442  }
443
444  /** Convert an object representing a path to a string. */
445  public static String path2String(final Object path) {
446    return path == null? null
447        : path instanceof String? (String)path
448        : path instanceof byte[][]? byteArray2PathString((byte[][])path)
449        : path.toString();
450  }
451
452  /**
453   * Convert a UTF8 string to an array of byte arrays.
454   */
455  public static byte[][] getPathComponents(String path) {
456    // avoid intermediate split to String[]
457    final byte[] bytes = string2Bytes(path);
458    return bytes2byteArray(bytes, bytes.length, (byte)Path.SEPARATOR_CHAR);
459  }
460
461  /**
462   * Splits the array of bytes into array of arrays of bytes
463   * on byte separator
464   * @param bytes the array of bytes to split
465   * @param separator the delimiting byte
466   */
467  public static byte[][] bytes2byteArray(byte[] bytes, byte separator) {
468    return bytes2byteArray(bytes, bytes.length, separator);
469  }
470
471  /**
472   * Splits first len bytes in bytes to array of arrays of bytes
473   * on byte separator
474   * @param bytes the byte array to split
475   * @param len the number of bytes to split
476   * @param separator the delimiting byte
477   */
478  public static byte[][] bytes2byteArray(byte[] bytes,
479                                         int len,
480                                         byte separator) {
481    Preconditions.checkPositionIndex(len, bytes.length);
482    if (len == 0) {
483      return new byte[][]{null};
484    }
485    // Count the splits. Omit multiple separators and the last one by
486    // peeking at prior byte.
487    int splits = 0;
488    for (int i = 1; i < len; i++) {
489      if (bytes[i-1] == separator && bytes[i] != separator) {
490        splits++;
491      }
492    }
493    if (splits == 0 && bytes[0] == separator) {
494      return new byte[][]{null};
495    }
496    splits++;
497    byte[][] result = new byte[splits][];
498    int nextIndex = 0;
499    // Build the splits.
500    for (int i = 0; i < splits; i++) {
501      int startIndex = nextIndex;
502      // find next separator in the bytes.
503      while (nextIndex < len && bytes[nextIndex] != separator) {
504        nextIndex++;
505      }
506      result[i] = (nextIndex > 0)
507          ? Arrays.copyOfRange(bytes, startIndex, nextIndex)
508          : EMPTY_BYTES; // reuse empty bytes for root.
509      do { // skip over separators.
510        nextIndex++;
511      } while (nextIndex < len && bytes[nextIndex] == separator);
512    }
513    return result;
514  }
515  
516  /**
517   * Convert a LocatedBlocks to BlockLocations[]
518   * @param blocks a LocatedBlocks
519   * @return an array of BlockLocations
520   */
521  public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
522    if (blocks == null) {
523      return new BlockLocation[0];
524    }
525    return locatedBlocks2Locations(blocks.getLocatedBlocks());
526  }
527  
528  /**
529   * Convert a List<LocatedBlock> to BlockLocation[]
530   * @param blocks A List<LocatedBlock> to be converted
531   * @return converted array of BlockLocation
532   */
533  public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
534    if (blocks == null) {
535      return new BlockLocation[0];
536    }
537    int nrBlocks = blocks.size();
538    BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
539    if (nrBlocks == 0) {
540      return blkLocations;
541    }
542    int idx = 0;
543    for (LocatedBlock blk : blocks) {
544      assert idx < nrBlocks : "Incorrect index";
545      DatanodeInfo[] locations = blk.getLocations();
546      String[] hosts = new String[locations.length];
547      String[] xferAddrs = new String[locations.length];
548      String[] racks = new String[locations.length];
549      for (int hCnt = 0; hCnt < locations.length; hCnt++) {
550        hosts[hCnt] = locations[hCnt].getHostName();
551        xferAddrs[hCnt] = locations[hCnt].getXferAddr();
552        NodeBase node = new NodeBase(xferAddrs[hCnt], 
553                                     locations[hCnt].getNetworkLocation());
554        racks[hCnt] = node.toString();
555      }
556      DatanodeInfo[] cachedLocations = blk.getCachedLocations();
557      String[] cachedHosts = new String[cachedLocations.length];
558      for (int i=0; i<cachedLocations.length; i++) {
559        cachedHosts[i] = cachedLocations[i].getHostName();
560      }
561      blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
562                                            racks,
563                                            blk.getStartOffset(),
564                                            blk.getBlockSize(),
565                                            blk.isCorrupt());
566      idx++;
567    }
568    return blkLocations;
569  }
570
571  /**
572   * Returns collection of nameservice Ids from the configuration.
573   * @param conf configuration
574   * @return collection of nameservice Ids, or null if not specified
575   */
576  public static Collection<String> getNameServiceIds(Configuration conf) {
577    return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
578  }
579
580  /**
581   * @return <code>coll</code> if it is non-null and non-empty. Otherwise,
582   * returns a list with a single null value.
583   */
584  private static Collection<String> emptyAsSingletonNull(Collection<String> coll) {
585    if (coll == null || coll.isEmpty()) {
586      return Collections.singletonList(null);
587    } else {
588      return coll;
589    }
590  }
591  
592  /**
593   * Namenode HighAvailability related configuration.
594   * Returns collection of namenode Ids from the configuration. One logical id
595   * for each namenode in the in the HA setup.
596   * 
597   * @param conf configuration
598   * @param nsId the nameservice ID to look at, or null for non-federated 
599   * @return collection of namenode Ids
600   */
601  public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
602    String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
603    return conf.getTrimmedStringCollection(key);
604  }
605  
606  /**
607   * Given a list of keys in the order of preference, returns a value
608   * for the key in the given order from the configuration.
609   * @param defaultValue default value to return, when key was not found
610   * @param keySuffix suffix to add to the key, if it is not null
611   * @param conf Configuration
612   * @param keys list of keys in the order of preference
613   * @return value of the key or default if a key was not found in configuration
614   */
615  private static String getConfValue(String defaultValue, String keySuffix,
616      Configuration conf, String... keys) {
617    String value = null;
618    for (String key : keys) {
619      key = addSuffix(key, keySuffix);
620      value = conf.get(key);
621      if (value != null) {
622        break;
623      }
624    }
625    if (value == null) {
626      value = defaultValue;
627    }
628    return value;
629  }
630  
631  /** Add non empty and non null suffix to a key */
632  private static String addSuffix(String key, String suffix) {
633    if (suffix == null || suffix.isEmpty()) {
634      return key;
635    }
636    assert !suffix.startsWith(".") :
637      "suffix '" + suffix + "' should not already have '.' prepended.";
638    return key + "." + suffix;
639  }
640  
641  /** Concatenate list of suffix strings '.' separated */
642  private static String concatSuffixes(String... suffixes) {
643    if (suffixes == null) {
644      return null;
645    }
646    return Joiner.on(".").skipNulls().join(suffixes);
647  }
648  
649  /**
650   * Return configuration key of format key.suffix1.suffix2...suffixN
651   */
652  public static String addKeySuffixes(String key, String... suffixes) {
653    String keySuffix = concatSuffixes(suffixes);
654    return addSuffix(key, keySuffix);
655  }
656
657  /**
658   * Returns the configured address for all NameNodes in the cluster.
659   * @param conf configuration
660   * @param defaultAddress default address to return in case key is not found.
661   * @param keys Set of keys to look for in the order of preference
662   * @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
663   */
664  private static Map<String, Map<String, InetSocketAddress>>
665    getAddresses(Configuration conf, String defaultAddress, String... keys) {
666    Collection<String> nameserviceIds = getNameServiceIds(conf);
667    return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys);
668  }
669
670  /**
671   * Returns the configured address for all NameNodes in the cluster.
672   * @param conf configuration
673   * @param nsIds
674   *@param defaultAddress default address to return in case key is not found.
675   * @param keys Set of keys to look for in the order of preference   @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
676   */
677  private static Map<String, Map<String, InetSocketAddress>>
678    getAddressesForNsIds(Configuration conf, Collection<String> nsIds,
679                         String defaultAddress, String... keys) {
680    // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
681    // across all of the configured nameservices and namenodes.
682    Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
683    for (String nsId : emptyAsSingletonNull(nsIds)) {
684      Map<String, InetSocketAddress> isas =
685        getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
686      if (!isas.isEmpty()) {
687        ret.put(nsId, isas);
688      }
689    }
690    return ret;
691  }
692  
693  /**
694   * Get all of the RPC addresses of the individual NNs in a given nameservice.
695   * 
696   * @param conf Configuration
697   * @param nsId the nameservice whose NNs addresses we want.
698   * @param defaultValue default address to return in case key is not found.
699   * @return A map from nnId -> RPC address of each NN in the nameservice.
700   */
701  public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
702      Configuration conf, String nsId, String defaultValue) {
703    return getAddressesForNameserviceId(conf, nsId, defaultValue,
704        DFS_NAMENODE_RPC_ADDRESS_KEY);
705  }
706
707  private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
708      Configuration conf, String nsId, String defaultValue,
709      String... keys) {
710    Collection<String> nnIds = getNameNodeIds(conf, nsId);
711    Map<String, InetSocketAddress> ret = Maps.newHashMap();
712    for (String nnId : emptyAsSingletonNull(nnIds)) {
713      String suffix = concatSuffixes(nsId, nnId);
714      String address = getConfValue(defaultValue, suffix, conf, keys);
715      if (address != null) {
716        InetSocketAddress isa = NetUtils.createSocketAddr(address);
717        if (isa.isUnresolved()) {
718          LOG.warn("Namenode for " + nsId +
719                   " remains unresolved for ID " + nnId +
720                   ".  Check your hdfs-site.xml file to " +
721                   "ensure namenodes are configured properly.");
722        }
723        ret.put(nnId, isa);
724      }
725    }
726    return ret;
727  }
728
729  /**
730   * @return a collection of all configured NN Kerberos principals.
731   */
732  public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException {
733    Set<String> principals = new HashSet<String>();
734    for (String nsId : DFSUtil.getNameServiceIds(conf)) {
735      if (HAUtil.isHAEnabled(conf, nsId)) {
736        for (String nnId : DFSUtil.getNameNodeIds(conf, nsId)) {
737          Configuration confForNn = new Configuration(conf);
738          NameNode.initializeGenericKeys(confForNn, nsId, nnId);
739          String principal = SecurityUtil.getServerPrincipal(confForNn
740              .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
741              NameNode.getAddress(confForNn).getHostName());
742          principals.add(principal);
743        }
744      } else {
745        Configuration confForNn = new Configuration(conf);
746        NameNode.initializeGenericKeys(confForNn, nsId, null);
747        String principal = SecurityUtil.getServerPrincipal(confForNn
748            .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
749            NameNode.getAddress(confForNn).getHostName());
750        principals.add(principal);
751      }
752    }
753
754    return principals;
755  }
756
757  /**
758   * Returns list of InetSocketAddress corresponding to HA NN RPC addresses from
759   * the configuration.
760   * 
761   * @param conf configuration
762   * @return list of InetSocketAddresses
763   */
764  public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
765      Configuration conf) {
766    return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
767  }
768
769  /**
770   * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
771   * the configuration.
772   *
773   * @return list of InetSocketAddresses
774   */
775  public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
776      Configuration conf, String scheme) {
777    if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
778      return getAddresses(conf, null,
779          DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
780    } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
781      return getAddresses(conf, null,
782          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
783    } else {
784      throw new IllegalArgumentException("Unsupported scheme: " + scheme);
785    }
786  }
787
788  /**
789   * Returns list of InetSocketAddress corresponding to  backup node rpc 
790   * addresses from the configuration.
791   * 
792   * @param conf configuration
793   * @return list of InetSocketAddresses
794   * @throws IOException on error
795   */
796  public static Map<String, Map<String, InetSocketAddress>> getBackupNodeAddresses(
797      Configuration conf) throws IOException {
798    Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf,
799        null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
800    if (addressList.isEmpty()) {
801      throw new IOException("Incorrect configuration: backup node address "
802          + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
803    }
804    return addressList;
805  }
806
807  /**
808   * Returns list of InetSocketAddresses of corresponding to secondary namenode
809   * http addresses from the configuration.
810   * 
811   * @param conf configuration
812   * @return list of InetSocketAddresses
813   * @throws IOException on error
814   */
815  public static Map<String, Map<String, InetSocketAddress>> getSecondaryNameNodeAddresses(
816      Configuration conf) throws IOException {
817    Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, null,
818        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
819    if (addressList.isEmpty()) {
820      throw new IOException("Incorrect configuration: secondary namenode address "
821          + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
822    }
823    return addressList;
824  }
825
826  /**
827   * Returns list of InetSocketAddresses corresponding to namenodes from the
828   * configuration.
829   * 
830   * Returns namenode address specifically configured for datanodes (using
831   * service ports), if found. If not, regular RPC address configured for other
832   * clients is returned.
833   * 
834   * @param conf configuration
835   * @return list of InetSocketAddress
836   * @throws IOException on error
837   */
838  public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
839      Configuration conf) throws IOException {
840    // Use default address as fall back
841    String defaultAddress;
842    try {
843      defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
844    } catch (IllegalArgumentException e) {
845      defaultAddress = null;
846    }
847    
848    Map<String, Map<String, InetSocketAddress>> addressList =
849      getAddresses(conf, defaultAddress,
850        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
851    if (addressList.isEmpty()) {
852      throw new IOException("Incorrect configuration: namenode address "
853          + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
854          + DFS_NAMENODE_RPC_ADDRESS_KEY
855          + " is not configured.");
856    }
857    return addressList;
858  }
859
860  /**
861   * Returns list of InetSocketAddresses corresponding to the namenode
862   * that manages this cluster. Note this is to be used by datanodes to get
863   * the list of namenode addresses to talk to.
864   *
865   * Returns namenode address specifically configured for datanodes (using
866   * service ports), if found. If not, regular RPC address configured for other
867   * clients is returned.
868   *
869   * @param conf configuration
870   * @return list of InetSocketAddress
871   * @throws IOException on error
872   */
873  public static Map<String, Map<String, InetSocketAddress>>
874    getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException {
875    // Use default address as fall back
876    String defaultAddress;
877    try {
878      defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
879    } catch (IllegalArgumentException e) {
880      defaultAddress = null;
881    }
882
883    Collection<String> parentNameServices = conf.getTrimmedStringCollection
884            (DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);
885
886    if (parentNameServices.isEmpty()) {
887      parentNameServices = conf.getTrimmedStringCollection
888              (DFSConfigKeys.DFS_NAMESERVICES);
889    } else {
890      // Ensure that the internal service is ineed in the list of all available
891      // nameservices.
892      Set<String> availableNameServices = Sets.newHashSet(conf
893              .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES));
894      for (String nsId : parentNameServices) {
895        if (!availableNameServices.contains(nsId)) {
896          throw new IOException("Unknown nameservice: " + nsId);
897        }
898      }
899    }
900
901    Map<String, Map<String, InetSocketAddress>> addressList =
902            getAddressesForNsIds(conf, parentNameServices, defaultAddress,
903                    DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
904    if (addressList.isEmpty()) {
905      throw new IOException("Incorrect configuration: namenode address "
906              + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
907              + DFS_NAMENODE_RPC_ADDRESS_KEY
908              + " is not configured.");
909    }
910    return addressList;
911  }
912
913  /**
914   * Flatten the given map, as returned by other functions in this class,
915   * into a flat list of {@link ConfiguredNNAddress} instances.
916   */
917  public static List<ConfiguredNNAddress> flattenAddressMap(
918      Map<String, Map<String, InetSocketAddress>> map) {
919    List<ConfiguredNNAddress> ret = Lists.newArrayList();
920    
921    for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
922      map.entrySet()) {
923      String nsId = entry.getKey();
924      Map<String, InetSocketAddress> nnMap = entry.getValue();
925      for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
926        String nnId = e2.getKey();
927        InetSocketAddress addr = e2.getValue();
928        
929        ret.add(new ConfiguredNNAddress(nsId, nnId, addr));
930      }
931    }
932    return ret;
933  }
934
935  /**
936   * Format the given map, as returned by other functions in this class,
937   * into a string suitable for debugging display. The format of this string
938   * should not be considered an interface, and is liable to change.
939   */
940  public static String addressMapToString(
941      Map<String, Map<String, InetSocketAddress>> map) {
942    StringBuilder b = new StringBuilder();
943    for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
944         map.entrySet()) {
945      String nsId = entry.getKey();
946      Map<String, InetSocketAddress> nnMap = entry.getValue();
947      b.append("Nameservice <").append(nsId).append(">:").append("\n");
948      for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
949        b.append("  NN ID ").append(e2.getKey())
950          .append(" => ").append(e2.getValue()).append("\n");
951      }
952    }
953    return b.toString();
954  }
955  
956  public static String nnAddressesAsString(Configuration conf) {
957    Map<String, Map<String, InetSocketAddress>> addresses =
958      getHaNnRpcAddresses(conf);
959    return addressMapToString(addresses);
960  }
961
962  /**
963   * Represent one of the NameNodes configured in the cluster.
964   */
965  public static class ConfiguredNNAddress {
966    private final String nameserviceId;
967    private final String namenodeId;
968    private final InetSocketAddress addr;
969
970    private ConfiguredNNAddress(String nameserviceId, String namenodeId,
971        InetSocketAddress addr) {
972      this.nameserviceId = nameserviceId;
973      this.namenodeId = namenodeId;
974      this.addr = addr;
975    }
976
977    public String getNameserviceId() {
978      return nameserviceId;
979    }
980
981    public String getNamenodeId() {
982      return namenodeId;
983    }
984
985    public InetSocketAddress getAddress() {
986      return addr;
987    }
988    
989    @Override
990    public String toString() {
991      return "ConfiguredNNAddress[nsId=" + nameserviceId + ";" +
992        "nnId=" + namenodeId + ";addr=" + addr + "]";
993    }
994  }
995
996  /** @return Internal name services specified in the conf. */
997  static Collection<String> getInternalNameServices(Configuration conf) {
998    final Collection<String> ids = conf.getTrimmedStringCollection(
999        DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);
1000    return !ids.isEmpty()? ids: getNameServiceIds(conf);
1001  }
1002
1003  /**
1004   * Get a URI for each internal nameservice. If a nameservice is
1005   * HA-enabled, and the configured failover proxy provider supports logical
1006   * URIs, then the logical URI of the nameservice is returned.
1007   * Otherwise, a URI corresponding to an RPC address of the single NN for that
1008   * nameservice is returned, preferring the service RPC address over the
1009   * client RPC address.
1010   * 
1011   * @param conf configuration
1012   * @return a collection of all configured NN URIs, preferring service
1013   *         addresses
1014   */
1015  public static Collection<URI> getInternalNsRpcUris(Configuration conf) {
1016    return getNameServiceUris(conf, getInternalNameServices(conf),
1017        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1018        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
1019  }
1020
1021  /**
1022   * Get a URI for each configured nameservice. If a nameservice is
1023   * HA-enabled, and the configured failover proxy provider supports logical
1024   * URIs, then the logical URI of the nameservice is returned.
1025   * Otherwise, a URI corresponding to the address of the single NN for that
1026   * nameservice is returned.
1027   * 
1028   * @param conf configuration
1029   * @param keys configuration keys to try in order to get the URI for non-HA
1030   *        nameservices
1031   * @return a collection of all configured NN URIs
1032   */
1033  static Collection<URI> getNameServiceUris(Configuration conf,
1034      Collection<String> nameServices, String... keys) {
1035    Set<URI> ret = new HashSet<URI>();
1036    
1037    // We're passed multiple possible configuration keys for any given NN or HA
1038    // nameservice, and search the config in order of these keys. In order to
1039    // make sure that a later config lookup (e.g. fs.defaultFS) doesn't add a
1040    // URI for a config key for which we've already found a preferred entry, we
1041    // keep track of non-preferred keys here.
1042    Set<URI> nonPreferredUris = new HashSet<URI>();
1043    
1044    for (String nsId : nameServices) {
1045      URI nsUri;
1046      try {
1047        nsUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId);
1048      } catch (URISyntaxException ue) {
1049        throw new IllegalArgumentException(ue);
1050      }
1051      /**
1052       * Determine whether the logical URI of the name service can be resolved
1053       * by the configured failover proxy provider. If not, we should try to
1054       * resolve the URI here
1055       */
1056      boolean useLogicalUri = false;
1057      try {
1058        useLogicalUri = HAUtil.useLogicalUri(conf, nsUri);
1059      } catch (IOException e){
1060        LOG.warn("Getting exception  while trying to determine if nameservice "
1061            + nsId + " can use logical URI: " + e);
1062      }
1063      if (HAUtil.isHAEnabled(conf, nsId) && useLogicalUri) {
1064        // Add the logical URI of the nameservice.
1065        ret.add(nsUri);
1066      } else {
1067        // Add the URI corresponding to the address of the NN.
1068        boolean uriFound = false;
1069        for (String key : keys) {
1070          String addr = conf.get(concatSuffixes(key, nsId));
1071          if (addr != null) {
1072            URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME,
1073                NetUtils.createSocketAddr(addr));
1074            if (!uriFound) {
1075              uriFound = true;
1076              ret.add(uri);
1077            } else {
1078              nonPreferredUris.add(uri);
1079            }
1080          }
1081        }
1082      }
1083    }
1084    
1085    // Add the generic configuration keys.
1086    boolean uriFound = false;
1087    for (String key : keys) {
1088      String addr = conf.get(key);
1089      if (addr != null) {
1090        URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr));
1091        if (!uriFound) {
1092          uriFound = true;
1093          ret.add(uri);
1094        } else {
1095          nonPreferredUris.add(uri);
1096        }
1097      }
1098    }
1099    
1100    // Add the default URI if it is an HDFS URI.
1101    URI defaultUri = FileSystem.getDefaultUri(conf);
1102    // checks if defaultUri is ip:port format
1103    // and convert it to hostname:port format
1104    if (defaultUri != null && (defaultUri.getPort() != -1)) {
1105      defaultUri = createUri(defaultUri.getScheme(),
1106          NetUtils.createSocketAddr(defaultUri.getHost(), 
1107              defaultUri.getPort()));
1108    }
1109    if (defaultUri != null &&
1110        HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
1111        !nonPreferredUris.contains(defaultUri)) {
1112      ret.add(defaultUri);
1113    }
1114    
1115    return ret;
1116  }
1117
1118  /**
1119   * Given the InetSocketAddress this method returns the nameservice Id
1120   * corresponding to the key with matching address, by doing a reverse 
1121   * lookup on the list of nameservices until it finds a match.
1122   * 
1123   * Since the process of resolving URIs to Addresses is slightly expensive,
1124   * this utility method should not be used in performance-critical routines.
1125   * 
1126   * @param conf - configuration
1127   * @param address - InetSocketAddress for configured communication with NN.
1128   *     Configured addresses are typically given as URIs, but we may have to
1129   *     compare against a URI typed in by a human, or the server name may be
1130   *     aliased, so we compare unambiguous InetSocketAddresses instead of just
1131   *     comparing URI substrings.
1132   * @param keys - list of configured communication parameters that should
1133   *     be checked for matches.  For example, to compare against RPC addresses,
1134   *     provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1135   *     DFS_NAMENODE_RPC_ADDRESS_KEY.  Use the generic parameter keys,
1136   *     not the NameServiceId-suffixed keys.
1137   * @return nameserviceId, or null if no match found
1138   */
1139  public static String getNameServiceIdFromAddress(final Configuration conf, 
1140      final InetSocketAddress address, String... keys) {
1141    // Configuration with a single namenode and no nameserviceId
1142    String[] ids = getSuffixIDs(conf, address, keys);
1143    return (ids != null) ? ids[0] : null;
1144  }
1145  
1146  /**
1147   * return server http or https address from the configuration for a
1148   * given namenode rpc address.
1149   * @param namenodeAddr - namenode RPC address
1150   * @param conf configuration
1151   * @param scheme - the scheme (http / https)
1152   * @return server http or https address
1153   * @throws IOException 
1154   */
1155  public static URI getInfoServer(InetSocketAddress namenodeAddr,
1156      Configuration conf, String scheme) throws IOException {
1157    String[] suffixes = null;
1158    if (namenodeAddr != null) {
1159      // if non-default namenode, try reverse look up 
1160      // the nameServiceID if it is available
1161      suffixes = getSuffixIDs(conf, namenodeAddr,
1162          DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1163          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
1164    }
1165
1166    String authority;
1167    if ("http".equals(scheme)) {
1168      authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY,
1169          DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes);
1170    } else if ("https".equals(scheme)) {
1171      authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY,
1172          DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes);
1173    } else {
1174      throw new IllegalArgumentException("Invalid scheme:" + scheme);
1175    }
1176
1177    if (namenodeAddr != null) {
1178      authority = substituteForWildcardAddress(authority,
1179          namenodeAddr.getHostName());
1180    }
1181    return URI.create(scheme + "://" + authority);
1182  }
1183
1184  /**
1185   * Lookup the HTTP / HTTPS address of the namenode, and replace its hostname
1186   * with defaultHost when it found out that the address is a wildcard / local
1187   * address.
1188   *
1189   * @param defaultHost
1190   *          The default host name of the namenode.
1191   * @param conf
1192   *          The configuration
1193   * @param scheme
1194   *          HTTP or HTTPS
1195   * @throws IOException
1196   */
1197  public static URI getInfoServerWithDefaultHost(String defaultHost,
1198      Configuration conf, final String scheme) throws IOException {
1199    URI configuredAddr = getInfoServer(null, conf, scheme);
1200    String authority = substituteForWildcardAddress(
1201        configuredAddr.getAuthority(), defaultHost);
1202    return URI.create(scheme + "://" + authority);
1203  }
1204
1205  /**
1206   * Determine whether HTTP or HTTPS should be used to connect to the remote
1207   * server. Currently the client only connects to the server via HTTPS if the
1208   * policy is set to HTTPS_ONLY.
1209   *
1210   * @return the scheme (HTTP / HTTPS)
1211   */
1212  public static String getHttpClientScheme(Configuration conf) {
1213    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
1214    return policy == HttpConfig.Policy.HTTPS_ONLY ? "https" : "http";
1215  }
1216
1217  /**
1218   * Substitute a default host in the case that an address has been configured
1219   * with a wildcard. This is used, for example, when determining the HTTP
1220   * address of the NN -- if it's configured to bind to 0.0.0.0, we want to
1221   * substitute the hostname from the filesystem URI rather than trying to
1222   * connect to 0.0.0.0.
1223   * @param configuredAddress the address found in the configuration
1224   * @param defaultHost the host to substitute with, if configuredAddress
1225   * is a local/wildcard address.
1226   * @return the substituted address
1227   * @throws IOException if it is a wildcard address and security is enabled
1228   */
1229  @VisibleForTesting
1230  static String substituteForWildcardAddress(String configuredAddress,
1231    String defaultHost) throws IOException {
1232    InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
1233    InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
1234        + ":0");
1235    final InetAddress addr = sockAddr.getAddress();
1236    if (addr != null && addr.isAnyLocalAddress()) {
1237      if (UserGroupInformation.isSecurityEnabled() &&
1238          defaultSockAddr.getAddress().isAnyLocalAddress()) {
1239        throw new IOException("Cannot use a wildcard address with security. " +
1240            "Must explicitly set bind address for Kerberos");
1241      }
1242      return defaultHost + ":" + sockAddr.getPort();
1243    } else {
1244      return configuredAddress;
1245    }
1246  }
1247  
1248  private static String getSuffixedConf(Configuration conf,
1249      String key, String defaultVal, String[] suffixes) {
1250    String ret = conf.get(DFSUtil.addKeySuffixes(key, suffixes));
1251    if (ret != null) {
1252      return ret;
1253    }
1254    return conf.get(key, defaultVal);
1255  }
1256  
1257  /**
1258   * Sets the node specific setting into generic configuration key. Looks up
1259   * value of "key.nameserviceId.namenodeId" and if found sets that value into 
1260   * generic key in the conf. If this is not found, falls back to
1261   * "key.nameserviceId" and then the unmodified key.
1262   *
1263   * Note that this only modifies the runtime conf.
1264   * 
1265   * @param conf
1266   *          Configuration object to lookup specific key and to set the value
1267   *          to the key passed. Note the conf object is modified.
1268   * @param nameserviceId
1269   *          nameservice Id to construct the node specific key. Pass null if
1270   *          federation is not configuration.
1271   * @param nnId
1272   *          namenode Id to construct the node specific key. Pass null if
1273   *          HA is not configured.
1274   * @param keys
1275   *          The key for which node specific value is looked up
1276   */
1277  public static void setGenericConf(Configuration conf,
1278      String nameserviceId, String nnId, String... keys) {
1279    for (String key : keys) {
1280      String value = conf.get(addKeySuffixes(key, nameserviceId, nnId));
1281      if (value != null) {
1282        conf.set(key, value);
1283        continue;
1284      }
1285      value = conf.get(addKeySuffixes(key, nameserviceId));
1286      if (value != null) {
1287        conf.set(key, value);
1288      }
1289    }
1290  }
1291  
1292  /** Return used as percentage of capacity */
1293  public static float getPercentUsed(long used, long capacity) {
1294    return capacity <= 0 ? 100 : (used * 100.0f)/capacity; 
1295  }
1296  
1297  /** Return remaining as percentage of capacity */
1298  public static float getPercentRemaining(long remaining, long capacity) {
1299    return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity; 
1300  }
1301
1302  /** Convert percentage to a string. */
1303  public static String percent2String(double percentage) {
1304    return StringUtils.format("%.2f%%", percentage);
1305  }
1306
1307  /**
1308   * Round bytes to GiB (gibibyte)
1309   * @param bytes number of bytes
1310   * @return number of GiB
1311   */
1312  public static int roundBytesToGB(long bytes) {
1313    return Math.round((float)bytes/ 1024 / 1024 / 1024);
1314  }
1315  
1316  /** Create a {@link ClientDatanodeProtocol} proxy */
1317  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1318      DatanodeID datanodeid, Configuration conf, int socketTimeout,
1319      boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
1320    return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
1321        connectToDnViaHostname, locatedBlock);
1322  }
1323  
1324  /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
1325  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1326      DatanodeID datanodeid, Configuration conf, int socketTimeout,
1327      boolean connectToDnViaHostname) throws IOException {
1328    return new ClientDatanodeProtocolTranslatorPB(
1329        datanodeid, conf, socketTimeout, connectToDnViaHostname);
1330  }
1331  
1332  /** Create a {@link ClientDatanodeProtocol} proxy */
1333  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1334      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
1335      SocketFactory factory) throws IOException {
1336    return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
1337  }
1338
1339  /**
1340   * Get nameservice Id for the {@link NameNode} based on namenode RPC address
1341   * matching the local node address.
1342   */
1343  public static String getNamenodeNameServiceId(Configuration conf) {
1344    return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
1345  }
1346  
1347  /**
1348   * Get nameservice Id for the BackupNode based on backup node RPC address
1349   * matching the local node address.
1350   */
1351  public static String getBackupNameServiceId(Configuration conf) {
1352    return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
1353  }
1354  
1355  /**
1356   * Get nameservice Id for the secondary node based on secondary http address
1357   * matching the local node address.
1358   */
1359  public static String getSecondaryNameServiceId(Configuration conf) {
1360    return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
1361  }
1362  
1363  /**
1364   * Get the nameservice Id by matching the {@code addressKey} with the
1365   * the address of the local node. 
1366   * 
1367   * If {@link DFSConfigKeys#DFS_NAMESERVICE_ID} is not specifically
1368   * configured, and more than one nameservice Id is configured, this method 
1369   * determines the nameservice Id by matching the local node's address with the
1370   * configured addresses. When a match is found, it returns the nameservice Id
1371   * from the corresponding configuration key.
1372   * 
1373   * @param conf Configuration
1374   * @param addressKey configuration key to get the address.
1375   * @return nameservice Id on success, null if federation is not configured.
1376   * @throws HadoopIllegalArgumentException on error
1377   */
1378  private static String getNameServiceId(Configuration conf, String addressKey) {
1379    String nameserviceId = conf.get(DFS_NAMESERVICE_ID);
1380    if (nameserviceId != null) {
1381      return nameserviceId;
1382    }
1383    Collection<String> nsIds = getNameServiceIds(conf);
1384    if (1 == nsIds.size()) {
1385      return nsIds.toArray(new String[1])[0];
1386    }
1387    String nnId = conf.get(DFS_HA_NAMENODE_ID_KEY);
1388    
1389    return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0];
1390  }
1391  
1392  /**
1393   * Returns nameservice Id and namenode Id when the local host matches the
1394   * configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id>
1395   * 
1396   * @param conf Configuration
1397   * @param addressKey configuration key corresponding to the address.
1398   * @param knownNsId only look at configs for the given nameservice, if not-null
1399   * @param knownNNId only look at configs for the given namenode, if not null
1400   * @param matcher matching criteria for matching the address
1401   * @return Array with nameservice Id and namenode Id on success. First element
1402   *         in the array is nameservice Id and second element is namenode Id.
1403   *         Null value indicates that the configuration does not have the the
1404   *         Id.
1405   * @throws HadoopIllegalArgumentException on error
1406   */
1407  static String[] getSuffixIDs(final Configuration conf, final String addressKey,
1408      String knownNsId, String knownNNId,
1409      final AddressMatcher matcher) {
1410    String nameserviceId = null;
1411    String namenodeId = null;
1412    int found = 0;
1413    
1414    Collection<String> nsIds = getNameServiceIds(conf);
1415    for (String nsId : emptyAsSingletonNull(nsIds)) {
1416      if (knownNsId != null && !knownNsId.equals(nsId)) {
1417        continue;
1418      }
1419      
1420      Collection<String> nnIds = getNameNodeIds(conf, nsId);
1421      for (String nnId : emptyAsSingletonNull(nnIds)) {
1422        if (LOG.isTraceEnabled()) {
1423          LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s",
1424              addressKey, nsId, nnId));
1425        }
1426        if (knownNNId != null && !knownNNId.equals(nnId)) {
1427          continue;
1428        }
1429        String key = addKeySuffixes(addressKey, nsId, nnId);
1430        String addr = conf.get(key);
1431        if (addr == null) {
1432          continue;
1433        }
1434        InetSocketAddress s = null;
1435        try {
1436          s = NetUtils.createSocketAddr(addr);
1437        } catch (Exception e) {
1438          LOG.warn("Exception in creating socket address " + addr, e);
1439          continue;
1440        }
1441        if (!s.isUnresolved() && matcher.match(s)) {
1442          nameserviceId = nsId;
1443          namenodeId = nnId;
1444          found++;
1445        }
1446      }
1447    }
1448    if (found > 1) { // Only one address must match the local address
1449      String msg = "Configuration has multiple addresses that match "
1450          + "local node's address. Please configure the system with "
1451          + DFS_NAMESERVICE_ID + " and "
1452          + DFS_HA_NAMENODE_ID_KEY;
1453      throw new HadoopIllegalArgumentException(msg);
1454    }
1455    return new String[] { nameserviceId, namenodeId };
1456  }
1457  
1458  /**
1459   * For given set of {@code keys} adds nameservice Id and or namenode Id
1460   * and returns {nameserviceId, namenodeId} when address match is found.
1461   * @see #getSuffixIDs(Configuration, String, String, String, AddressMatcher)
1462   */
1463  static String[] getSuffixIDs(final Configuration conf,
1464      final InetSocketAddress address, final String... keys) {
1465    AddressMatcher matcher = new AddressMatcher() {
1466     @Override
1467      public boolean match(InetSocketAddress s) {
1468        return address.equals(s);
1469      } 
1470    };
1471    
1472    for (String key : keys) {
1473      String[] ids = getSuffixIDs(conf, key, null, null, matcher);
1474      if (ids != null && (ids [0] != null || ids[1] != null)) {
1475        return ids;
1476      }
1477    }
1478    return null;
1479  }
1480  
1481  private interface AddressMatcher {
1482    public boolean match(InetSocketAddress s);
1483  }
1484
1485  /** Create a URI from the scheme and address */
1486  public static URI createUri(String scheme, InetSocketAddress address) {
1487    try {
1488      return new URI(scheme, null, address.getHostName(), address.getPort(),
1489          null, null, null);
1490    } catch (URISyntaxException ue) {
1491      throw new IllegalArgumentException(ue);
1492    }
1493  }
1494  
1495  /**
1496   * Add protobuf based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}
1497   * @param conf configuration
1498   * @param protocol Protocol interface
1499   * @param service service that implements the protocol
1500   * @param server RPC server to which the protocol & implementation is added to
1501   * @throws IOException
1502   */
1503  public static void addPBProtocol(Configuration conf, Class<?> protocol,
1504      BlockingService service, RPC.Server server) throws IOException {
1505    RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
1506    server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service);
1507  }
1508
1509  /**
1510   * Map a logical namenode ID to its service address. Use the given
1511   * nameservice if specified, or the configured one if none is given.
1512   *
1513   * @param conf Configuration
1514   * @param nsId which nameservice nnId is a part of, optional
1515   * @param nnId the namenode ID to get the service addr for
1516   * @return the service addr, null if it could not be determined
1517   */
1518  public static String getNamenodeServiceAddr(final Configuration conf,
1519      String nsId, String nnId) {
1520
1521    if (nsId == null) {
1522      nsId = getOnlyNameServiceIdOrNull(conf);
1523    }
1524
1525    String serviceAddrKey = concatSuffixes(
1526        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
1527
1528    String addrKey = concatSuffixes(
1529        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
1530
1531    String serviceRpcAddr = conf.get(serviceAddrKey);
1532    if (serviceRpcAddr == null) {
1533      serviceRpcAddr = conf.get(addrKey);
1534    }
1535    return serviceRpcAddr;
1536  }
1537
1538  /**
1539   * If the configuration refers to only a single nameservice, return the
1540   * name of that nameservice. If it refers to 0 or more than 1, return null.
1541   */
1542  public static String getOnlyNameServiceIdOrNull(Configuration conf) {
1543    Collection<String> nsIds = getNameServiceIds(conf);
1544    if (1 == nsIds.size()) {
1545      return nsIds.toArray(new String[1])[0];
1546    } else {
1547      // No nameservice ID was given and more than one is configured
1548      return null;
1549    }
1550  }
1551  
1552  public static final Options helpOptions = new Options();
1553  public static final Option helpOpt = new Option("h", "help", false,
1554      "get help information");
1555
1556  static {
1557    helpOptions.addOption(helpOpt);
1558  }
1559
1560  /**
1561   * Parse the arguments for commands
1562   * 
1563   * @param args the argument to be parsed
1564   * @param helpDescription help information to be printed out
1565   * @param out Printer
1566   * @param printGenericCommandUsage whether to print the 
1567   *              generic command usage defined in ToolRunner
1568   * @return true when the argument matches help option, false if not
1569   */
1570  public static boolean parseHelpArgument(String[] args,
1571      String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
1572    if (args.length == 1) {
1573      try {
1574        CommandLineParser parser = new PosixParser();
1575        CommandLine cmdLine = parser.parse(helpOptions, args);
1576        if (cmdLine.hasOption(helpOpt.getOpt())
1577            || cmdLine.hasOption(helpOpt.getLongOpt())) {
1578          // should print out the help information
1579          out.println(helpDescription + "\n");
1580          if (printGenericCommandUsage) {
1581            ToolRunner.printGenericCommandUsage(out);
1582          }
1583          return true;
1584        }
1585      } catch (ParseException pe) {
1586        return false;
1587      }
1588    }
1589    return false;
1590  }
1591  
1592  /**
1593   * Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration.
1594   * 
1595   * @param conf Configuration
1596   * @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION
1597   */
1598  public static float getInvalidateWorkPctPerIteration(Configuration conf) {
1599    float blocksInvalidateWorkPct = conf.getFloat(
1600        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
1601        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
1602    Preconditions.checkArgument(
1603        (blocksInvalidateWorkPct > 0 && blocksInvalidateWorkPct <= 1.0f),
1604        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
1605        " = '" + blocksInvalidateWorkPct + "' is invalid. " +
1606        "It should be a positive, non-zero float value, not greater than 1.0f, " +
1607        "to indicate a percentage.");
1608    return blocksInvalidateWorkPct;
1609  }
1610
1611  /**
1612   * Get DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION from
1613   * configuration.
1614   * 
1615   * @param conf Configuration
1616   * @return Value of DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
1617   */
1618  public static int getReplWorkMultiplier(Configuration conf) {
1619    int blocksReplWorkMultiplier = conf.getInt(
1620            DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
1621            DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
1622    Preconditions.checkArgument(
1623        (blocksReplWorkMultiplier > 0),
1624        DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION +
1625        " = '" + blocksReplWorkMultiplier + "' is invalid. " +
1626        "It should be a positive, non-zero integer value.");
1627    return blocksReplWorkMultiplier;
1628  }
1629  
1630  /**
1631   * Get SPNEGO keytab Key from configuration
1632   * 
1633   * @param conf Configuration
1634   * @param defaultKey default key to be used for config lookup
1635   * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty
1636   *         else return defaultKey
1637   */
1638  public static String getSpnegoKeytabKey(Configuration conf, String defaultKey) {
1639    String value = 
1640        conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
1641    return (value == null || value.isEmpty()) ?
1642        defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
1643  }
1644
1645  /**
1646   * Get http policy. Http Policy is chosen as follows:
1647   * <ol>
1648   * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
1649   * https endpoints are started on configured https ports</li>
1650   * <li>This configuration is overridden by dfs.https.enable configuration, if
1651   * it is set to true. In that case, both http and https endpoints are stared.</li>
1652   * <li>All the above configurations are overridden by dfs.http.policy
1653   * configuration. With this configuration you can set http-only, https-only
1654   * and http-and-https endpoints.</li>
1655   * </ol>
1656   * See hdfs-default.xml documentation for more details on each of the above
1657   * configuration settings.
1658   */
1659  public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
1660    String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1661    if (policyStr == null) {
1662      boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
1663          DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
1664
1665      boolean hadoopSsl = conf.getBoolean(
1666          CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
1667          CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
1668
1669      if (hadoopSsl) {
1670        LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
1671            + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1672            + ".");
1673      }
1674      if (https) {
1675        LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
1676            + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1677            + ".");
1678      }
1679
1680      return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
1681          : HttpConfig.Policy.HTTP_ONLY;
1682    }
1683
1684    HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
1685    if (policy == null) {
1686      throw new HadoopIllegalArgumentException("Unregonized value '"
1687          + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1688    }
1689
1690    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
1691    return policy;
1692  }
1693
1694  public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
1695      Configuration sslConf) {
1696    return builder
1697        .needsClientAuth(
1698            sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1699                DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
1700        .keyPassword(getPassword(sslConf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY))
1701        .keyStore(sslConf.get("ssl.server.keystore.location"),
1702            getPassword(sslConf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY),
1703            sslConf.get("ssl.server.keystore.type", "jks"))
1704        .trustStore(sslConf.get("ssl.server.truststore.location"),
1705            getPassword(sslConf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY),
1706            sslConf.get("ssl.server.truststore.type", "jks"))
1707        .excludeCiphers(
1708            sslConf.get("ssl.server.exclude.cipher.list"));
1709  }
1710
1711  /**
1712   * Load HTTPS-related configuration.
1713   */
1714  public static Configuration loadSslConfiguration(Configuration conf) {
1715    Configuration sslConf = new Configuration(false);
1716
1717    sslConf.addResource(conf.get(
1718        DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
1719        DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
1720
1721    boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1722        DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
1723    sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth);
1724    return sslConf;
1725  }
1726
1727  /**
1728   * Return a HttpServer.Builder that the journalnode / namenode / secondary
1729   * namenode can use to initialize their HTTP / HTTPS server.
1730   *
1731   */
1732  public static HttpServer2.Builder httpServerTemplateForNNAndJN(
1733      Configuration conf, final InetSocketAddress httpAddr,
1734      final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
1735      String spnegoKeytabFileKey) throws IOException {
1736    HttpConfig.Policy policy = getHttpPolicy(conf);
1737
1738    HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
1739        .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
1740        .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
1741        .setUsernameConfKey(spnegoUserNameKey)
1742        .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
1743
1744    // initialize the webserver for uploading/downloading files.
1745    if (UserGroupInformation.isSecurityEnabled()) {
1746      LOG.info("Starting web server as: "
1747          + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
1748              httpAddr.getHostName()));
1749    }
1750
1751    if (policy.isHttpEnabled()) {
1752      if (httpAddr.getPort() == 0) {
1753        builder.setFindPort(true);
1754      }
1755
1756      URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr));
1757      builder.addEndpoint(uri);
1758      LOG.info("Starting Web-server for " + name + " at: " + uri);
1759    }
1760
1761    if (policy.isHttpsEnabled() && httpsAddr != null) {
1762      Configuration sslConf = loadSslConfiguration(conf);
1763      loadSslConfToHttpServerBuilder(builder, sslConf);
1764
1765      if (httpsAddr.getPort() == 0) {
1766        builder.setFindPort(true);
1767      }
1768
1769      URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr));
1770      builder.addEndpoint(uri);
1771      LOG.info("Starting Web-server for " + name + " at: " + uri);
1772    }
1773    return builder;
1774  }
1775
1776  /**
1777   * Leverages the Configuration.getPassword method to attempt to get
1778   * passwords from the CredentialProvider API before falling back to
1779   * clear text in config - if falling back is allowed.
1780   * @param conf Configuration instance
1781   * @param alias name of the credential to retreive
1782   * @return String credential value or null
1783   */
1784  static String getPassword(Configuration conf, String alias) {
1785    String password = null;
1786    try {
1787      char[] passchars = conf.getPassword(alias);
1788      if (passchars != null) {
1789        password = new String(passchars);
1790      }
1791    }
1792    catch (IOException ioe) {
1793      password = null;
1794    }
1795    return password;
1796  }
1797
1798  /**
1799   * Converts a Date into an ISO-8601 formatted datetime string.
1800   */
1801  public static String dateToIso8601String(Date date) {
1802    SimpleDateFormat df =
1803        new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
1804    return df.format(date);
1805  }
1806
1807  /**
1808   * Converts a time duration in milliseconds into DDD:HH:MM:SS format.
1809   */
1810  public static String durationToString(long durationMs) {
1811    boolean negative = false;
1812    if (durationMs < 0) {
1813      negative = true;
1814      durationMs = -durationMs;
1815    }
1816    // Chop off the milliseconds
1817    long durationSec = durationMs / 1000;
1818    final int secondsPerMinute = 60;
1819    final int secondsPerHour = 60*60;
1820    final int secondsPerDay = 60*60*24;
1821    final long days = durationSec / secondsPerDay;
1822    durationSec -= days * secondsPerDay;
1823    final long hours = durationSec / secondsPerHour;
1824    durationSec -= hours * secondsPerHour;
1825    final long minutes = durationSec / secondsPerMinute;
1826    durationSec -= minutes * secondsPerMinute;
1827    final long seconds = durationSec;
1828    final long milliseconds = durationMs % 1000;
1829    String format = "%03d:%02d:%02d:%02d.%03d";
1830    if (negative)  {
1831      format = "-" + format;
1832    }
1833    return String.format(format, days, hours, minutes, seconds, milliseconds);
1834  }
1835
1836  /**
1837   * Converts a relative time string into a duration in milliseconds.
1838   */
1839  public static long parseRelativeTime(String relTime) throws IOException {
1840    if (relTime.length() < 2) {
1841      throw new IOException("Unable to parse relative time value of " + relTime
1842          + ": too short");
1843    }
1844    String ttlString = relTime.substring(0, relTime.length()-1);
1845    long ttl;
1846    try {
1847      ttl = Long.parseLong(ttlString);
1848    } catch (NumberFormatException e) {
1849      throw new IOException("Unable to parse relative time value of " + relTime
1850          + ": " + ttlString + " is not a number");
1851    }
1852    if (relTime.endsWith("s")) {
1853      // pass
1854    } else if (relTime.endsWith("m")) {
1855      ttl *= 60;
1856    } else if (relTime.endsWith("h")) {
1857      ttl *= 60*60;
1858    } else if (relTime.endsWith("d")) {
1859      ttl *= 60*60*24;
1860    } else {
1861      throw new IOException("Unable to parse relative time value of " + relTime
1862          + ": unknown time unit " + relTime.charAt(relTime.length() - 1));
1863    }
1864    return ttl*1000;
1865  }
1866
1867  /**
1868   * Assert that all objects in the collection are equal. Returns silently if
1869   * so, throws an AssertionError if any object is not equal. All null values
1870   * are considered equal.
1871   * 
1872   * @param objects the collection of objects to check for equality.
1873   */
1874  public static void assertAllResultsEqual(Collection<?> objects)
1875      throws AssertionError {
1876    if (objects.size() == 0 || objects.size() == 1)
1877      return;
1878    
1879    Object[] resultsArray = objects.toArray();
1880    for (int i = 1; i < resultsArray.length; i++) {
1881      Object currElement = resultsArray[i];
1882      Object lastElement = resultsArray[i - 1];
1883      if ((currElement == null && currElement != lastElement) ||
1884          (currElement != null && !currElement.equals(lastElement))) {
1885        throw new AssertionError("Not all elements match in results: " +
1886          Arrays.toString(resultsArray));
1887      }
1888    }
1889  }
1890
1891  /**
1892   * Creates a new KeyProvider from the given Configuration.
1893   *
1894   * @param conf Configuration
1895   * @return new KeyProvider, or null if no provider was found.
1896   * @throws IOException if the KeyProvider is improperly specified in
1897   *                             the Configuration
1898   */
1899  public static KeyProvider createKeyProvider(
1900      final Configuration conf) throws IOException {
1901    final String providerUriStr =
1902        conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
1903    // No provider set in conf
1904    if (providerUriStr.isEmpty()) {
1905      return null;
1906    }
1907    final URI providerUri;
1908    try {
1909      providerUri = new URI(providerUriStr);
1910    } catch (URISyntaxException e) {
1911      throw new IOException(e);
1912    }
1913    KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf);
1914    if (keyProvider == null) {
1915      throw new IOException("Could not instantiate KeyProvider from " + 
1916          DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + " setting of '" + 
1917          providerUriStr +"'");
1918    }
1919    if (keyProvider.isTransient()) {
1920      throw new IOException("KeyProvider " + keyProvider.toString()
1921          + " was found but it is a transient provider.");
1922    }
1923    return keyProvider;
1924  }
1925
1926  /**
1927   * Creates a new KeyProviderCryptoExtension by wrapping the
1928   * KeyProvider specified in the given Configuration.
1929   *
1930   * @param conf Configuration
1931   * @return new KeyProviderCryptoExtension, or null if no provider was found.
1932   * @throws IOException if the KeyProvider is improperly specified in
1933   *                             the Configuration
1934   */
1935  public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
1936      final Configuration conf) throws IOException {
1937    KeyProvider keyProvider = createKeyProvider(conf);
1938    if (keyProvider == null) {
1939      return null;
1940    }
1941    KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension
1942        .createKeyProviderCryptoExtension(keyProvider);
1943    return cryptoProvider;
1944  }
1945
1946  /**
1947   * Probe for HDFS Encryption being enabled; this uses the value of
1948   * the option {@link DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI},
1949   * returning true if that property contains a non-empty, non-whitespace
1950   * string.
1951   * @param conf configuration to probe
1952   * @return true if encryption is considered enabled.
1953   */
1954  public static boolean isHDFSEncryptionEnabled(Configuration conf) {
1955    return !conf.getTrimmed(
1956        DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "").isEmpty();
1957  }
1958
1959}