001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.blockmanagement; 019 020import java.util.Arrays; 021import java.util.Iterator; 022import java.util.List; 023 024import com.google.common.annotations.VisibleForTesting; 025 026import org.apache.hadoop.fs.StorageType; 027import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 028import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; 029import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; 030import org.apache.hadoop.hdfs.server.protocol.StorageReport; 031 032/** 033 * A Datanode has one or more storages. A storage in the Datanode is represented 034 * by this class. 035 */ 036public class DatanodeStorageInfo { 037 public static final DatanodeStorageInfo[] EMPTY_ARRAY = {}; 038 039 public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) { 040 return toDatanodeInfos(Arrays.asList(storages)); 041 } 042 static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) { 043 final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()]; 044 for(int i = 0; i < storages.size(); i++) { 045 datanodes[i] = storages.get(i).getDatanodeDescriptor(); 046 } 047 return datanodes; 048 } 049 050 static DatanodeDescriptor[] toDatanodeDescriptors( 051 DatanodeStorageInfo[] storages) { 052 DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length]; 053 for (int i = 0; i < storages.length; ++i) { 054 datanodes[i] = storages[i].getDatanodeDescriptor(); 055 } 056 return datanodes; 057 } 058 059 public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { 060 String[] storageIDs = new String[storages.length]; 061 for(int i = 0; i < storageIDs.length; i++) { 062 storageIDs[i] = storages[i].getStorageID(); 063 } 064 return storageIDs; 065 } 066 067 public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) { 068 StorageType[] storageTypes = new StorageType[storages.length]; 069 for(int i = 0; i < storageTypes.length; i++) { 070 storageTypes[i] = storages[i].getStorageType(); 071 } 072 return storageTypes; 073 } 074 075 public void updateFromStorage(DatanodeStorage storage) { 076 state = storage.getState(); 077 storageType = storage.getStorageType(); 078 } 079 080 /** 081 * Iterates over the list of blocks belonging to the data-node. 082 */ 083 class BlockIterator implements Iterator<BlockInfoContiguous> { 084 private BlockInfoContiguous current; 085 086 BlockIterator(BlockInfoContiguous head) { 087 this.current = head; 088 } 089 090 public boolean hasNext() { 091 return current != null; 092 } 093 094 public BlockInfoContiguous next() { 095 BlockInfoContiguous res = current; 096 current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this)); 097 return res; 098 } 099 100 public void remove() { 101 throw new UnsupportedOperationException("Sorry. can't remove."); 102 } 103 } 104 105 private final DatanodeDescriptor dn; 106 private final String storageID; 107 private StorageType storageType; 108 private State state; 109 110 private long capacity; 111 private long dfsUsed; 112 private long nonDfsUsed; 113 private volatile long remaining; 114 private long blockPoolUsed; 115 116 private volatile BlockInfoContiguous blockList = null; 117 private int numBlocks = 0; 118 119 /** The number of block reports received */ 120 private int blockReportCount = 0; 121 122 /** 123 * Set to false on any NN failover, and reset to true 124 * whenever a block report is received. 125 */ 126 private boolean heartbeatedSinceFailover = false; 127 128 /** 129 * At startup or at failover, the storages in the cluster may have pending 130 * block deletions from a previous incarnation of the NameNode. The block 131 * contents are considered as stale until a block report is received. When a 132 * storage is considered as stale, the replicas on it are also considered as 133 * stale. If any block has at least one stale replica, then no invalidations 134 * will be processed for this block. See HDFS-1972. 135 */ 136 private boolean blockContentsStale = true; 137 138 DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s) { 139 this.dn = dn; 140 this.storageID = s.getStorageID(); 141 this.storageType = s.getStorageType(); 142 this.state = s.getState(); 143 } 144 145 public int getBlockReportCount() { 146 return blockReportCount; 147 } 148 149 void setBlockReportCount(int blockReportCount) { 150 this.blockReportCount = blockReportCount; 151 } 152 153 boolean areBlockContentsStale() { 154 return blockContentsStale; 155 } 156 157 void markStaleAfterFailover() { 158 heartbeatedSinceFailover = false; 159 blockContentsStale = true; 160 } 161 162 void receivedHeartbeat(StorageReport report) { 163 updateState(report); 164 heartbeatedSinceFailover = true; 165 } 166 167 void receivedBlockReport() { 168 if (heartbeatedSinceFailover) { 169 blockContentsStale = false; 170 } 171 blockReportCount++; 172 } 173 174 @VisibleForTesting 175 public void setUtilizationForTesting(long capacity, long dfsUsed, 176 long remaining, long blockPoolUsed) { 177 this.capacity = capacity; 178 this.dfsUsed = dfsUsed; 179 this.remaining = remaining; 180 this.blockPoolUsed = blockPoolUsed; 181 } 182 183 State getState() { 184 return this.state; 185 } 186 187 void setState(State state) { 188 this.state = state; 189 } 190 191 boolean areBlocksOnFailedStorage() { 192 return getState() == State.FAILED && numBlocks != 0; 193 } 194 195 public String getStorageID() { 196 return storageID; 197 } 198 199 public StorageType getStorageType() { 200 return storageType; 201 } 202 203 long getCapacity() { 204 return capacity; 205 } 206 207 long getDfsUsed() { 208 return dfsUsed; 209 } 210 211 long getNonDfsUsed() { 212 return nonDfsUsed; 213 } 214 long getRemaining() { 215 return remaining; 216 } 217 218 long getBlockPoolUsed() { 219 return blockPoolUsed; 220 } 221 222 public AddBlockResult addBlock(BlockInfoContiguous b) { 223 // First check whether the block belongs to a different storage 224 // on the same DN. 225 AddBlockResult result = AddBlockResult.ADDED; 226 DatanodeStorageInfo otherStorage = 227 b.findStorageInfo(getDatanodeDescriptor()); 228 229 if (otherStorage != null) { 230 if (otherStorage != this) { 231 // The block belongs to a different storage. Remove it first. 232 otherStorage.removeBlock(b); 233 result = AddBlockResult.REPLACED; 234 } else { 235 // The block is already associated with this storage. 236 return AddBlockResult.ALREADY_EXIST; 237 } 238 } 239 240 // add to the head of the data-node list 241 b.addStorage(this); 242 blockList = b.listInsert(blockList, this); 243 numBlocks++; 244 return result; 245 } 246 247 public boolean removeBlock(BlockInfoContiguous b) { 248 blockList = b.listRemove(blockList, this); 249 if (b.removeStorage(this)) { 250 numBlocks--; 251 return true; 252 } else { 253 return false; 254 } 255 } 256 257 int numBlocks() { 258 return numBlocks; 259 } 260 261 Iterator<BlockInfoContiguous> getBlockIterator() { 262 return new BlockIterator(blockList); 263 264 } 265 266 /** 267 * Move block to the head of the list of blocks belonging to the data-node. 268 * @return the index of the head of the blockList 269 */ 270 int moveBlockToHead(BlockInfoContiguous b, int curIndex, int headIndex) { 271 blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex); 272 return curIndex; 273 } 274 275 /** 276 * Used for testing only 277 * @return the head of the blockList 278 */ 279 @VisibleForTesting 280 BlockInfoContiguous getBlockListHeadForTesting(){ 281 return blockList; 282 } 283 284 void updateState(StorageReport r) { 285 capacity = r.getCapacity(); 286 dfsUsed = r.getDfsUsed(); 287 nonDfsUsed = r.getNonDfsUsed(); 288 remaining = r.getRemaining(); 289 blockPoolUsed = r.getBlockPoolUsed(); 290 } 291 292 public DatanodeDescriptor getDatanodeDescriptor() { 293 return dn; 294 } 295 296 /** Increment the number of blocks scheduled for each given storage */ 297 public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) { 298 for (DatanodeStorageInfo s : storages) { 299 s.getDatanodeDescriptor().incrementBlocksScheduled(s.getStorageType()); 300 } 301 } 302 303 @Override 304 public boolean equals(Object obj) { 305 if (this == obj) { 306 return true; 307 } else if (obj == null || !(obj instanceof DatanodeStorageInfo)) { 308 return false; 309 } 310 final DatanodeStorageInfo that = (DatanodeStorageInfo)obj; 311 return this.storageID.equals(that.storageID); 312 } 313 314 @Override 315 public int hashCode() { 316 return storageID.hashCode(); 317 } 318 319 @Override 320 public String toString() { 321 return "[" + storageType + "]" + storageID + ":" + state + ":" + dn; 322 } 323 324 StorageReport toStorageReport() { 325 return new StorageReport(new DatanodeStorage(storageID, state, storageType), 326 false, capacity, dfsUsed, remaining, blockPoolUsed, nonDfsUsed); 327 } 328 329 static Iterable<StorageType> toStorageTypes( 330 final Iterable<DatanodeStorageInfo> infos) { 331 return new Iterable<StorageType>() { 332 @Override 333 public Iterator<StorageType> iterator() { 334 return new Iterator<StorageType>() { 335 final Iterator<DatanodeStorageInfo> i = infos.iterator(); 336 @Override 337 public boolean hasNext() {return i.hasNext();} 338 @Override 339 public StorageType next() {return i.next().getStorageType();} 340 @Override 341 public void remove() { 342 throw new UnsupportedOperationException(); 343 } 344 }; 345 } 346 }; 347 } 348 349 /** @return the first {@link DatanodeStorageInfo} corresponding to 350 * the given datanode 351 */ 352 static DatanodeStorageInfo getDatanodeStorageInfo( 353 final Iterable<DatanodeStorageInfo> infos, 354 final DatanodeDescriptor datanode) { 355 if (datanode == null) { 356 return null; 357 } 358 for(DatanodeStorageInfo storage : infos) { 359 if (storage.getDatanodeDescriptor() == datanode) { 360 return storage; 361 } 362 } 363 return null; 364 } 365 366 static enum AddBlockResult { 367 ADDED, REPLACED, ALREADY_EXIST; 368 } 369}