001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hdfs.server.namenode; 020 021import java.io.IOException; 022import java.io.InputStream; 023import java.io.OutputStream; 024import java.util.ArrayList; 025import java.util.Iterator; 026import java.util.List; 027import java.util.Map; 028 029import org.apache.commons.logging.Log; 030import org.apache.commons.logging.LogFactory; 031import org.apache.hadoop.HadoopIllegalArgumentException; 032import org.apache.hadoop.classification.InterfaceAudience; 033import org.apache.hadoop.fs.permission.AclEntry; 034import org.apache.hadoop.fs.permission.AclEntryScope; 035import org.apache.hadoop.fs.permission.AclEntryType; 036import org.apache.hadoop.fs.permission.FsAction; 037import org.apache.hadoop.fs.permission.FsPermission; 038import org.apache.hadoop.fs.permission.PermissionStatus; 039import org.apache.hadoop.fs.StorageType; 040import org.apache.hadoop.fs.XAttr; 041import org.apache.hadoop.hdfs.protocol.Block; 042import org.apache.hadoop.hdfs.protocol.HdfsConstants; 043import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; 044import org.apache.hadoop.hdfs.protocolPB.PBHelper; 045import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; 046import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; 047import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; 048import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; 049import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; 050import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; 051import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry; 052import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; 053import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; 054import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto; 055import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto; 056import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto; 057import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto; 058import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto; 059import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; 060import org.apache.hadoop.hdfs.util.EnumCounters; 061import org.apache.hadoop.hdfs.util.ReadOnlyList; 062 063import com.google.common.base.Preconditions; 064import com.google.common.collect.ImmutableList; 065import com.google.protobuf.ByteString; 066 067@InterfaceAudience.Private 068public final class FSImageFormatPBINode { 069 private final static long USER_GROUP_STRID_MASK = (1 << 24) - 1; 070 private final static int USER_STRID_OFFSET = 40; 071 private final static int GROUP_STRID_OFFSET = 16; 072 private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class); 073 074 private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1; 075 private static final int ACL_ENTRY_NAME_OFFSET = 6; 076 private static final int ACL_ENTRY_TYPE_OFFSET = 3; 077 private static final int ACL_ENTRY_SCOPE_OFFSET = 5; 078 private static final int ACL_ENTRY_PERM_MASK = 7; 079 private static final int ACL_ENTRY_TYPE_MASK = 3; 080 private static final int ACL_ENTRY_SCOPE_MASK = 1; 081 private static final FsAction[] FSACTION_VALUES = FsAction.values(); 082 private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope 083 .values(); 084 private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType 085 .values(); 086 087 private static final int XATTR_NAMESPACE_MASK = 3; 088 private static final int XATTR_NAMESPACE_OFFSET = 30; 089 private static final int XATTR_NAME_MASK = (1 << 24) - 1; 090 private static final int XATTR_NAME_OFFSET = 6; 091 092 /* See the comments in fsimage.proto for an explanation of the following. */ 093 private static final int XATTR_NAMESPACE_EXT_OFFSET = 5; 094 private static final int XATTR_NAMESPACE_EXT_MASK = 1; 095 096 private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = 097 XAttr.NameSpace.values(); 098 099 100 public final static class Loader { 101 public static PermissionStatus loadPermission(long id, 102 final String[] stringTable) { 103 short perm = (short) (id & ((1 << GROUP_STRID_OFFSET) - 1)); 104 int gsid = (int) ((id >> GROUP_STRID_OFFSET) & USER_GROUP_STRID_MASK); 105 int usid = (int) ((id >> USER_STRID_OFFSET) & USER_GROUP_STRID_MASK); 106 return new PermissionStatus(stringTable[usid], stringTable[gsid], 107 new FsPermission(perm)); 108 } 109 110 public static ImmutableList<AclEntry> loadAclEntries( 111 AclFeatureProto proto, final String[] stringTable) { 112 ImmutableList.Builder<AclEntry> b = ImmutableList.builder(); 113 for (int v : proto.getEntriesList()) { 114 int p = v & ACL_ENTRY_PERM_MASK; 115 int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK; 116 int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK; 117 int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK; 118 String name = stringTable[nid]; 119 b.add(new AclEntry.Builder().setName(name) 120 .setPermission(FSACTION_VALUES[p]) 121 .setScope(ACL_ENTRY_SCOPE_VALUES[s]) 122 .setType(ACL_ENTRY_TYPE_VALUES[t]).build()); 123 } 124 return b.build(); 125 } 126 127 public static ImmutableList<XAttr> loadXAttrs( 128 XAttrFeatureProto proto, final String[] stringTable) { 129 ImmutableList.Builder<XAttr> b = ImmutableList.builder(); 130 for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) { 131 int v = xAttrCompactProto.getName(); 132 int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK; 133 int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK; 134 ns |= 135 ((v >> XATTR_NAMESPACE_EXT_OFFSET) & XATTR_NAMESPACE_EXT_MASK) << 2; 136 String name = stringTable[nid]; 137 byte[] value = null; 138 if (xAttrCompactProto.getValue() != null) { 139 value = xAttrCompactProto.getValue().toByteArray(); 140 } 141 b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns]) 142 .setName(name).setValue(value).build()); 143 } 144 145 return b.build(); 146 } 147 148 public static ImmutableList<QuotaByStorageTypeEntry> loadQuotaByStorageTypeEntries( 149 QuotaByStorageTypeFeatureProto proto) { 150 ImmutableList.Builder<QuotaByStorageTypeEntry> b = ImmutableList.builder(); 151 for (QuotaByStorageTypeEntryProto quotaEntry : proto.getQuotasList()) { 152 StorageType type = PBHelper.convertStorageType(quotaEntry.getStorageType()); 153 long quota = quotaEntry.getQuota(); 154 b.add(new QuotaByStorageTypeEntry.Builder().setStorageType(type) 155 .setQuota(quota).build()); 156 } 157 return b.build(); 158 } 159 160 public static INodeDirectory loadINodeDirectory(INodeSection.INode n, 161 LoaderContext state) { 162 assert n.getType() == INodeSection.INode.Type.DIRECTORY; 163 INodeSection.INodeDirectory d = n.getDirectory(); 164 165 final PermissionStatus permissions = loadPermission(d.getPermission(), 166 state.getStringTable()); 167 final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName() 168 .toByteArray(), permissions, d.getModificationTime()); 169 final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota(); 170 if (nsQuota >= 0 || dsQuota >= 0) { 171 dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature.Builder(). 172 nameSpaceQuota(nsQuota).storageSpaceQuota(dsQuota).build()); 173 } 174 EnumCounters<StorageType> typeQuotas = null; 175 if (d.hasTypeQuotas()) { 176 ImmutableList<QuotaByStorageTypeEntry> qes = 177 loadQuotaByStorageTypeEntries(d.getTypeQuotas()); 178 typeQuotas = new EnumCounters<StorageType>(StorageType.class, 179 HdfsConstants.QUOTA_RESET); 180 for (QuotaByStorageTypeEntry qe : qes) { 181 if (qe.getQuota() >= 0 && qe.getStorageType() != null && 182 qe.getStorageType().supportTypeQuota()) { 183 typeQuotas.set(qe.getStorageType(), qe.getQuota()); 184 } 185 } 186 187 if (typeQuotas.anyGreaterOrEqual(0)) { 188 DirectoryWithQuotaFeature q = dir.getDirectoryWithQuotaFeature(); 189 if (q == null) { 190 dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature. 191 Builder().typeQuotas(typeQuotas).build()); 192 } else { 193 q.setQuota(typeQuotas); 194 } 195 } 196 } 197 198 if (d.hasAcl()) { 199 int[] entries = AclEntryStatusFormat.toInt(loadAclEntries( 200 d.getAcl(), state.getStringTable())); 201 dir.addAclFeature(new AclFeature(entries)); 202 } 203 if (d.hasXAttrs()) { 204 dir.addXAttrFeature(new XAttrFeature( 205 loadXAttrs(d.getXAttrs(), state.getStringTable()))); 206 } 207 return dir; 208 } 209 210 public static void updateBlocksMap(INodeFile file, BlockManager bm) { 211 // Add file->block mapping 212 final BlockInfoContiguous[] blocks = file.getBlocks(); 213 if (blocks != null) { 214 for (int i = 0; i < blocks.length; i++) { 215 file.setBlock(i, bm.addBlockCollection(blocks[i], file)); 216 } 217 } 218 } 219 220 private final FSDirectory dir; 221 private final FSNamesystem fsn; 222 private final FSImageFormatProtobuf.Loader parent; 223 private final List<INodeFile> ucFiles; 224 225 Loader(FSNamesystem fsn, final FSImageFormatProtobuf.Loader parent) { 226 this.fsn = fsn; 227 this.dir = fsn.dir; 228 this.parent = parent; 229 this.ucFiles = new ArrayList<INodeFile>(); 230 } 231 232 void loadINodeDirectorySection(InputStream in) throws IOException { 233 final List<INodeReference> refList = parent.getLoaderContext() 234 .getRefList(); 235 while (true) { 236 INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry 237 .parseDelimitedFrom(in); 238 // note that in is a LimitedInputStream 239 if (e == null) { 240 break; 241 } 242 INodeDirectory p = dir.getInode(e.getParent()).asDirectory(); 243 for (long id : e.getChildrenList()) { 244 INode child = dir.getInode(id); 245 addToParent(p, child); 246 } 247 for (int refId : e.getRefChildrenList()) { 248 INodeReference ref = refList.get(refId); 249 addToParent(p, ref); 250 } 251 } 252 } 253 254 void loadINodeSection(InputStream in) throws IOException { 255 INodeSection s = INodeSection.parseDelimitedFrom(in); 256 fsn.dir.resetLastInodeId(s.getLastInodeId()); 257 LOG.info("Loading " + s.getNumInodes() + " INodes."); 258 for (int i = 0; i < s.getNumInodes(); ++i) { 259 INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); 260 if (p.getId() == INodeId.ROOT_INODE_ID) { 261 loadRootINode(p); 262 } else { 263 INode n = loadINode(p); 264 dir.addToInodeMap(n); 265 } 266 } 267 } 268 269 /** 270 * Load the under-construction files section, and update the lease map 271 */ 272 void loadFilesUnderConstructionSection(InputStream in) throws IOException { 273 // This section is consumed, but not actually used for restoring leases. 274 while (true) { 275 FileUnderConstructionEntry entry = FileUnderConstructionEntry 276 .parseDelimitedFrom(in); 277 if (entry == null) { 278 break; 279 } 280 } 281 282 // Add a lease for each and every file under construction. 283 for (INodeFile file : ucFiles) { 284 FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature(); 285 Preconditions.checkState(uc != null); // file must be under-construction 286 String path = file.getFullPathName(); 287 // Skip the deleted files in snapshot. This leaks UC inodes that are 288 // deleted from the current view. 289 if (path.startsWith("/")) { 290 fsn.leaseManager.addLease(uc.getClientName(), path); 291 } 292 } 293 } 294 295 private void addToParent(INodeDirectory parent, INode child) { 296 if (parent == dir.rootDir && FSDirectory.isReservedName(child)) { 297 throw new HadoopIllegalArgumentException("File name \"" 298 + child.getLocalName() + "\" is reserved. Please " 299 + " change the name of the existing file or directory to another " 300 + "name before upgrading to this release."); 301 } 302 // NOTE: This does not update space counts for parents 303 if (!parent.addChild(child)) { 304 return; 305 } 306 dir.cacheName(child); 307 308 if (child.isFile()) { 309 updateBlocksMap(child.asFile(), fsn.getBlockManager()); 310 } 311 } 312 313 private INode loadINode(INodeSection.INode n) { 314 switch (n.getType()) { 315 case FILE: 316 return loadINodeFile(n); 317 case DIRECTORY: 318 return loadINodeDirectory(n, parent.getLoaderContext()); 319 case SYMLINK: 320 return loadINodeSymlink(n); 321 default: 322 break; 323 } 324 return null; 325 } 326 327 private INodeFile loadINodeFile(INodeSection.INode n) { 328 assert n.getType() == INodeSection.INode.Type.FILE; 329 INodeSection.INodeFile f = n.getFile(); 330 List<BlockProto> bp = f.getBlocksList(); 331 short replication = (short) f.getReplication(); 332 LoaderContext state = parent.getLoaderContext(); 333 334 BlockInfoContiguous[] blocks = new BlockInfoContiguous[bp.size()]; 335 for (int i = 0, e = bp.size(); i < e; ++i) { 336 blocks[i] = new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication); 337 } 338 final PermissionStatus permissions = loadPermission(f.getPermission(), 339 parent.getLoaderContext().getStringTable()); 340 341 final INodeFile file = new INodeFile(n.getId(), 342 n.getName().toByteArray(), permissions, f.getModificationTime(), 343 f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(), 344 (byte)f.getStoragePolicyID()); 345 346 if (f.hasAcl()) { 347 int[] entries = AclEntryStatusFormat.toInt(loadAclEntries( 348 f.getAcl(), state.getStringTable())); 349 file.addAclFeature(new AclFeature(entries)); 350 } 351 352 if (f.hasXAttrs()) { 353 file.addXAttrFeature(new XAttrFeature( 354 loadXAttrs(f.getXAttrs(), state.getStringTable()))); 355 } 356 357 // under-construction information 358 if (f.hasFileUC()) { 359 ucFiles.add(file); 360 INodeSection.FileUnderConstructionFeature uc = f.getFileUC(); 361 file.toUnderConstruction(uc.getClientName(), uc.getClientMachine()); 362 if (blocks.length > 0) { 363 BlockInfoContiguous lastBlk = file.getLastBlock(); 364 // replace the last block of file 365 file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction( 366 lastBlk, replication)); 367 } 368 } 369 return file; 370 } 371 372 373 private INodeSymlink loadINodeSymlink(INodeSection.INode n) { 374 assert n.getType() == INodeSection.INode.Type.SYMLINK; 375 INodeSection.INodeSymlink s = n.getSymlink(); 376 final PermissionStatus permissions = loadPermission(s.getPermission(), 377 parent.getLoaderContext().getStringTable()); 378 INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(), 379 permissions, s.getModificationTime(), s.getAccessTime(), 380 s.getTarget().toStringUtf8()); 381 return sym; 382 } 383 384 private void loadRootINode(INodeSection.INode p) { 385 INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext()); 386 final QuotaCounts q = root.getQuotaCounts(); 387 final long nsQuota = q.getNameSpace(); 388 final long dsQuota = q.getStorageSpace(); 389 if (nsQuota != -1 || dsQuota != -1) { 390 dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota); 391 } 392 final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces(); 393 if (typeQuotas.anyGreaterOrEqual(0)) { 394 dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas); 395 } 396 dir.rootDir.cloneModificationTime(root); 397 dir.rootDir.clonePermissionStatus(root); 398 final AclFeature af = root.getFeature(AclFeature.class); 399 if (af != null) { 400 dir.rootDir.addAclFeature(af); 401 } 402 // root dir supports having extended attributes according to POSIX 403 final XAttrFeature f = root.getXAttrFeature(); 404 if (f != null) { 405 dir.rootDir.addXAttrFeature(f); 406 } 407 dir.addRootDirToEncryptionZone(f); 408 } 409 } 410 411 public final static class Saver { 412 private static long buildPermissionStatus(INodeAttributes n, 413 final SaverContext.DeduplicationMap<String> stringMap) { 414 long userId = stringMap.getId(n.getUserName()); 415 long groupId = stringMap.getId(n.getGroupName()); 416 return ((userId & USER_GROUP_STRID_MASK) << USER_STRID_OFFSET) 417 | ((groupId & USER_GROUP_STRID_MASK) << GROUP_STRID_OFFSET) 418 | n.getFsPermissionShort(); 419 } 420 421 private static AclFeatureProto.Builder buildAclEntries(AclFeature f, 422 final SaverContext.DeduplicationMap<String> map) { 423 AclFeatureProto.Builder b = AclFeatureProto.newBuilder(); 424 for (int pos = 0, e; pos < f.getEntriesSize(); pos++) { 425 e = f.getEntryAt(pos); 426 int nameId = map.getId(AclEntryStatusFormat.getName(e)); 427 int v = ((nameId & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET) 428 | (AclEntryStatusFormat.getType(e).ordinal() << ACL_ENTRY_TYPE_OFFSET) 429 | (AclEntryStatusFormat.getScope(e).ordinal() << ACL_ENTRY_SCOPE_OFFSET) 430 | (AclEntryStatusFormat.getPermission(e).ordinal()); 431 b.addEntries(v); 432 } 433 return b; 434 } 435 436 private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f, 437 final SaverContext.DeduplicationMap<String> stringMap) { 438 XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder(); 439 for (XAttr a : f.getXAttrs()) { 440 XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto. 441 newBuilder(); 442 int nsOrd = a.getNameSpace().ordinal(); 443 Preconditions.checkArgument(nsOrd < 8, "Too many namespaces."); 444 int v = ((nsOrd & XATTR_NAMESPACE_MASK) << XATTR_NAMESPACE_OFFSET) 445 | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) << 446 XATTR_NAME_OFFSET); 447 v |= (((nsOrd >> 2) & XATTR_NAMESPACE_EXT_MASK) << 448 XATTR_NAMESPACE_EXT_OFFSET); 449 xAttrCompactBuilder.setName(v); 450 if (a.getValue() != null) { 451 xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue())); 452 } 453 b.addXAttrs(xAttrCompactBuilder.build()); 454 } 455 456 return b; 457 } 458 459 private static QuotaByStorageTypeFeatureProto.Builder 460 buildQuotaByStorageTypeEntries(QuotaCounts q) { 461 QuotaByStorageTypeFeatureProto.Builder b = 462 QuotaByStorageTypeFeatureProto.newBuilder(); 463 for (StorageType t: StorageType.getTypesSupportingQuota()) { 464 if (q.getTypeSpace(t) >= 0) { 465 QuotaByStorageTypeEntryProto.Builder eb = 466 QuotaByStorageTypeEntryProto.newBuilder(). 467 setStorageType(PBHelper.convertStorageType(t)). 468 setQuota(q.getTypeSpace(t)); 469 b.addQuotas(eb); 470 } 471 } 472 return b; 473 } 474 475 public static INodeSection.INodeFile.Builder buildINodeFile( 476 INodeFileAttributes file, final SaverContext state) { 477 INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder() 478 .setAccessTime(file.getAccessTime()) 479 .setModificationTime(file.getModificationTime()) 480 .setPermission(buildPermissionStatus(file, state.getStringMap())) 481 .setPreferredBlockSize(file.getPreferredBlockSize()) 482 .setReplication(file.getFileReplication()) 483 .setStoragePolicyID(file.getLocalStoragePolicyID()); 484 485 AclFeature f = file.getAclFeature(); 486 if (f != null) { 487 b.setAcl(buildAclEntries(f, state.getStringMap())); 488 } 489 XAttrFeature xAttrFeature = file.getXAttrFeature(); 490 if (xAttrFeature != null) { 491 b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap())); 492 } 493 return b; 494 } 495 496 public static INodeSection.INodeDirectory.Builder buildINodeDirectory( 497 INodeDirectoryAttributes dir, final SaverContext state) { 498 QuotaCounts quota = dir.getQuotaCounts(); 499 INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory 500 .newBuilder().setModificationTime(dir.getModificationTime()) 501 .setNsQuota(quota.getNameSpace()) 502 .setDsQuota(quota.getStorageSpace()) 503 .setPermission(buildPermissionStatus(dir, state.getStringMap())); 504 505 if (quota.getTypeSpaces().anyGreaterOrEqual(0)) { 506 b.setTypeQuotas(buildQuotaByStorageTypeEntries(quota)); 507 } 508 509 AclFeature f = dir.getAclFeature(); 510 if (f != null) { 511 b.setAcl(buildAclEntries(f, state.getStringMap())); 512 } 513 XAttrFeature xAttrFeature = dir.getXAttrFeature(); 514 if (xAttrFeature != null) { 515 b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap())); 516 } 517 return b; 518 } 519 520 private final FSNamesystem fsn; 521 private final FileSummary.Builder summary; 522 private final SaveNamespaceContext context; 523 private final FSImageFormatProtobuf.Saver parent; 524 525 Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) { 526 this.parent = parent; 527 this.summary = summary; 528 this.context = parent.getContext(); 529 this.fsn = context.getSourceNamesystem(); 530 } 531 532 void serializeINodeDirectorySection(OutputStream out) throws IOException { 533 Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory() 534 .getINodeMap().getMapIterator(); 535 final ArrayList<INodeReference> refList = parent.getSaverContext() 536 .getRefList(); 537 int i = 0; 538 while (iter.hasNext()) { 539 INodeWithAdditionalFields n = iter.next(); 540 if (!n.isDirectory()) { 541 continue; 542 } 543 544 ReadOnlyList<INode> children = n.asDirectory().getChildrenList( 545 Snapshot.CURRENT_STATE_ID); 546 if (children.size() > 0) { 547 INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection. 548 DirEntry.newBuilder().setParent(n.getId()); 549 for (INode inode : children) { 550 if (!inode.isReference()) { 551 b.addChildren(inode.getId()); 552 } else { 553 refList.add(inode.asReference()); 554 b.addRefChildren(refList.size() - 1); 555 } 556 } 557 INodeDirectorySection.DirEntry e = b.build(); 558 e.writeDelimitedTo(out); 559 } 560 561 ++i; 562 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { 563 context.checkCancelled(); 564 } 565 } 566 parent.commitSection(summary, 567 FSImageFormatProtobuf.SectionName.INODE_DIR); 568 } 569 570 void serializeINodeSection(OutputStream out) throws IOException { 571 INodeMap inodesMap = fsn.dir.getINodeMap(); 572 573 INodeSection.Builder b = INodeSection.newBuilder() 574 .setLastInodeId(fsn.dir.getLastInodeId()).setNumInodes(inodesMap.size()); 575 INodeSection s = b.build(); 576 s.writeDelimitedTo(out); 577 578 int i = 0; 579 Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator(); 580 while (iter.hasNext()) { 581 INodeWithAdditionalFields n = iter.next(); 582 save(out, n); 583 ++i; 584 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { 585 context.checkCancelled(); 586 } 587 } 588 parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE); 589 } 590 591 void serializeFilesUCSection(OutputStream out) throws IOException { 592 Map<String, INodeFile> ucMap = fsn.getFilesUnderConstruction(); 593 for (Map.Entry<String, INodeFile> entry : ucMap.entrySet()) { 594 String path = entry.getKey(); 595 INodeFile file = entry.getValue(); 596 FileUnderConstructionEntry.Builder b = FileUnderConstructionEntry 597 .newBuilder().setInodeId(file.getId()).setFullPath(path); 598 FileUnderConstructionEntry e = b.build(); 599 e.writeDelimitedTo(out); 600 } 601 parent.commitSection(summary, 602 FSImageFormatProtobuf.SectionName.FILES_UNDERCONSTRUCTION); 603 } 604 605 private void save(OutputStream out, INode n) throws IOException { 606 if (n.isDirectory()) { 607 save(out, n.asDirectory()); 608 } else if (n.isFile()) { 609 save(out, n.asFile()); 610 } else if (n.isSymlink()) { 611 save(out, n.asSymlink()); 612 } 613 } 614 615 private void save(OutputStream out, INodeDirectory n) throws IOException { 616 INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n, 617 parent.getSaverContext()); 618 INodeSection.INode r = buildINodeCommon(n) 619 .setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build(); 620 r.writeDelimitedTo(out); 621 } 622 623 private void save(OutputStream out, INodeFile n) throws IOException { 624 INodeSection.INodeFile.Builder b = buildINodeFile(n, 625 parent.getSaverContext()); 626 627 if (n.getBlocks() != null) { 628 for (Block block : n.getBlocks()) { 629 b.addBlocks(PBHelper.convert(block)); 630 } 631 } 632 633 FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature(); 634 if (uc != null) { 635 INodeSection.FileUnderConstructionFeature f = 636 INodeSection.FileUnderConstructionFeature 637 .newBuilder().setClientName(uc.getClientName()) 638 .setClientMachine(uc.getClientMachine()).build(); 639 b.setFileUC(f); 640 } 641 642 INodeSection.INode r = buildINodeCommon(n) 643 .setType(INodeSection.INode.Type.FILE).setFile(b).build(); 644 r.writeDelimitedTo(out); 645 } 646 647 private void save(OutputStream out, INodeSymlink n) throws IOException { 648 SaverContext state = parent.getSaverContext(); 649 INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink 650 .newBuilder() 651 .setPermission(buildPermissionStatus(n, state.getStringMap())) 652 .setTarget(ByteString.copyFrom(n.getSymlink())) 653 .setModificationTime(n.getModificationTime()) 654 .setAccessTime(n.getAccessTime()); 655 656 INodeSection.INode r = buildINodeCommon(n) 657 .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build(); 658 r.writeDelimitedTo(out); 659 } 660 661 private final INodeSection.INode.Builder buildINodeCommon(INode n) { 662 return INodeSection.INode.newBuilder() 663 .setId(n.getId()) 664 .setName(ByteString.copyFrom(n.getLocalNameBytes())); 665 } 666 } 667 668 private FSImageFormatPBINode() { 669 } 670}