001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.datanode; 019 020import java.io.File; 021import java.io.FileInputStream; 022import java.io.FileOutputStream; 023import java.io.IOException; 024import java.util.ArrayList; 025import java.util.HashMap; 026import java.util.List; 027import java.util.Map; 028 029import org.apache.hadoop.classification.InterfaceAudience; 030import org.apache.hadoop.fs.FileUtil; 031import org.apache.hadoop.fs.HardLink; 032import org.apache.hadoop.hdfs.protocol.Block; 033import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; 034import org.apache.hadoop.io.IOUtils; 035 036import com.google.common.annotations.VisibleForTesting; 037 038/** 039 * This class is used by datanodes to maintain meta data of its replicas. 040 * It provides a general interface for meta information of a replica. 041 */ 042@InterfaceAudience.Private 043abstract public class ReplicaInfo extends Block implements Replica { 044 045 /** volume where the replica belongs */ 046 private FsVolumeSpi volume; 047 048 /** directory where block & meta files belong */ 049 050 /** 051 * Base directory containing numerically-identified sub directories and 052 * possibly blocks. 053 */ 054 private File baseDir; 055 056 /** 057 * Whether or not this replica's parent directory includes subdirs, in which 058 * case we can generate them based on the replica's block ID 059 */ 060 private boolean hasSubdirs; 061 062 private static final Map<String, File> internedBaseDirs = new HashMap<String, File>(); 063 064 /** 065 * Constructor 066 * @param block a block 067 * @param vol volume where replica is located 068 * @param dir directory path where block and meta files are located 069 */ 070 ReplicaInfo(Block block, FsVolumeSpi vol, File dir) { 071 this(block.getBlockId(), block.getNumBytes(), 072 block.getGenerationStamp(), vol, dir); 073 } 074 075 /** 076 * Constructor 077 * @param blockId block id 078 * @param len replica length 079 * @param genStamp replica generation stamp 080 * @param vol volume where replica is located 081 * @param dir directory path where block and meta files are located 082 */ 083 ReplicaInfo(long blockId, long len, long genStamp, 084 FsVolumeSpi vol, File dir) { 085 super(blockId, len, genStamp); 086 this.volume = vol; 087 setDirInternal(dir); 088 } 089 090 /** 091 * Copy constructor. 092 * @param from where to copy from 093 */ 094 ReplicaInfo(ReplicaInfo from) { 095 this(from, from.getVolume(), from.getDir()); 096 } 097 098 /** 099 * Get the full path of this replica's data file 100 * @return the full path of this replica's data file 101 */ 102 public File getBlockFile() { 103 return new File(getDir(), getBlockName()); 104 } 105 106 /** 107 * Get the full path of this replica's meta file 108 * @return the full path of this replica's meta file 109 */ 110 public File getMetaFile() { 111 return new File(getDir(), 112 DatanodeUtil.getMetaName(getBlockName(), getGenerationStamp())); 113 } 114 115 /** 116 * Get the volume where this replica is located on disk 117 * @return the volume where this replica is located on disk 118 */ 119 public FsVolumeSpi getVolume() { 120 return volume; 121 } 122 123 /** 124 * Set the volume where this replica is located on disk 125 */ 126 void setVolume(FsVolumeSpi vol) { 127 this.volume = vol; 128 } 129 130 /** 131 * Get the storageUuid of the volume that stores this replica. 132 */ 133 @Override 134 public String getStorageUuid() { 135 return volume.getStorageID(); 136 } 137 138 /** 139 * Return the parent directory path where this replica is located 140 * @return the parent directory path where this replica is located 141 */ 142 File getDir() { 143 return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir, 144 getBlockId()) : baseDir; 145 } 146 147 /** 148 * Set the parent directory where this replica is located 149 * @param dir the parent directory where the replica is located 150 */ 151 public void setDir(File dir) { 152 setDirInternal(dir); 153 } 154 155 private void setDirInternal(File dir) { 156 if (dir == null) { 157 baseDir = null; 158 return; 159 } 160 161 ReplicaDirInfo dirInfo = parseBaseDir(dir); 162 this.hasSubdirs = dirInfo.hasSubidrs; 163 164 synchronized (internedBaseDirs) { 165 if (!internedBaseDirs.containsKey(dirInfo.baseDirPath)) { 166 // Create a new String path of this file and make a brand new File object 167 // to guarantee we drop the reference to the underlying char[] storage. 168 File baseDir = new File(dirInfo.baseDirPath); 169 internedBaseDirs.put(dirInfo.baseDirPath, baseDir); 170 } 171 this.baseDir = internedBaseDirs.get(dirInfo.baseDirPath); 172 } 173 } 174 175 @VisibleForTesting 176 public static class ReplicaDirInfo { 177 public String baseDirPath; 178 public boolean hasSubidrs; 179 180 public ReplicaDirInfo (String baseDirPath, boolean hasSubidrs) { 181 this.baseDirPath = baseDirPath; 182 this.hasSubidrs = hasSubidrs; 183 } 184 } 185 186 @VisibleForTesting 187 public static ReplicaDirInfo parseBaseDir(File dir) { 188 189 File currentDir = dir; 190 boolean hasSubdirs = false; 191 while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) { 192 hasSubdirs = true; 193 currentDir = currentDir.getParentFile(); 194 } 195 196 return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs); 197 } 198 199 /** 200 * check if this replica has already been unlinked. 201 * @return true if the replica has already been unlinked 202 * or no need to be detached; false otherwise 203 */ 204 public boolean isUnlinked() { 205 return true; // no need to be unlinked 206 } 207 208 /** 209 * set that this replica is unlinked 210 */ 211 public void setUnlinked() { 212 // no need to be unlinked 213 } 214 215 /** 216 * Number of bytes reserved for this replica on disk. 217 */ 218 public long getBytesReserved() { 219 return 0; 220 } 221 222 /** 223 * Copy specified file into a temporary file. Then rename the 224 * temporary file to the original name. This will cause any 225 * hardlinks to the original file to be removed. The temporary 226 * files are created in the same directory. The temporary files will 227 * be recovered (especially on Windows) on datanode restart. 228 */ 229 private void unlinkFile(File file, Block b) throws IOException { 230 File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file)); 231 try { 232 FileInputStream in = new FileInputStream(file); 233 try { 234 FileOutputStream out = new FileOutputStream(tmpFile); 235 try { 236 IOUtils.copyBytes(in, out, 16*1024); 237 } finally { 238 out.close(); 239 } 240 } finally { 241 in.close(); 242 } 243 if (file.length() != tmpFile.length()) { 244 throw new IOException("Copy of file " + file + " size " + file.length()+ 245 " into file " + tmpFile + 246 " resulted in a size of " + tmpFile.length()); 247 } 248 FileUtil.replaceFile(tmpFile, file); 249 } catch (IOException e) { 250 boolean done = tmpFile.delete(); 251 if (!done) { 252 DataNode.LOG.info("detachFile failed to delete temporary file " + 253 tmpFile); 254 } 255 throw e; 256 } 257 } 258 259 /** 260 * Remove a hard link by copying the block to a temporary place and 261 * then moving it back 262 * @param numLinks number of hard links 263 * @return true if copy is successful; 264 * false if it is already detached or no need to be detached 265 * @throws IOException if there is any copy error 266 */ 267 public boolean unlinkBlock(int numLinks) throws IOException { 268 if (isUnlinked()) { 269 return false; 270 } 271 File file = getBlockFile(); 272 if (file == null || getVolume() == null) { 273 throw new IOException("detachBlock:Block not found. " + this); 274 } 275 File meta = getMetaFile(); 276 277 if (HardLink.getLinkCount(file) > numLinks) { 278 DataNode.LOG.info("CopyOnWrite for block " + this); 279 unlinkFile(file, this); 280 } 281 if (HardLink.getLinkCount(meta) > numLinks) { 282 unlinkFile(meta, this); 283 } 284 setUnlinked(); 285 return true; 286 } 287 288 @Override //Object 289 public String toString() { 290 return getClass().getSimpleName() 291 + ", " + super.toString() 292 + ", " + getState() 293 + "\n getNumBytes() = " + getNumBytes() 294 + "\n getBytesOnDisk() = " + getBytesOnDisk() 295 + "\n getVisibleLength()= " + getVisibleLength() 296 + "\n getVolume() = " + getVolume() 297 + "\n getBlockFile() = " + getBlockFile(); 298 } 299 300 @Override 301 public boolean isOnTransientStorage() { 302 return volume.isTransientStorage(); 303 } 304}