001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.namenode.snapshot; 019 020import java.util.Collections; 021import java.util.List; 022 023import org.apache.hadoop.hdfs.protocol.Block; 024import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; 025import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; 026import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; 027import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; 028import org.apache.hadoop.hdfs.server.namenode.INode; 029import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; 030import org.apache.hadoop.hdfs.server.namenode.INodeFile; 031import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; 032 033/** A list of FileDiffs for storing snapshot data. */ 034public class FileDiffList extends 035 AbstractINodeDiffList<INodeFile, INodeFileAttributes, FileDiff> { 036 037 @Override 038 FileDiff createDiff(int snapshotId, INodeFile file) { 039 return new FileDiff(snapshotId, file); 040 } 041 042 @Override 043 INodeFileAttributes createSnapshotCopy(INodeFile currentINode) { 044 return new INodeFileAttributes.SnapshotCopy(currentINode); 045 } 046 047 public void destroyAndCollectSnapshotBlocks( 048 BlocksMapUpdateInfo collectedBlocks) { 049 for(FileDiff d : asList()) 050 d.destroyAndCollectSnapshotBlocks(collectedBlocks); 051 } 052 053 public void saveSelf2Snapshot(int latestSnapshotId, INodeFile iNodeFile, 054 INodeFileAttributes snapshotCopy, boolean withBlocks) { 055 final FileDiff diff = 056 super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy); 057 if(withBlocks) // Store blocks if this is the first update 058 diff.setBlocks(iNodeFile.getBlocks()); 059 } 060 061 public BlockInfoContiguous[] findEarlierSnapshotBlocks(int snapshotId) { 062 assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id"; 063 if(snapshotId == Snapshot.CURRENT_STATE_ID) { 064 return null; 065 } 066 List<FileDiff> diffs = this.asList(); 067 int i = Collections.binarySearch(diffs, snapshotId); 068 BlockInfoContiguous[] blocks = null; 069 for(i = i >= 0 ? i : -i-2; i >= 0; i--) { 070 blocks = diffs.get(i).getBlocks(); 071 if(blocks != null) { 072 break; 073 } 074 } 075 return blocks; 076 } 077 078 public BlockInfoContiguous[] findLaterSnapshotBlocks(int snapshotId) { 079 assert snapshotId != Snapshot.NO_SNAPSHOT_ID : "Wrong snapshot id"; 080 if(snapshotId == Snapshot.CURRENT_STATE_ID) { 081 return null; 082 } 083 List<FileDiff> diffs = this.asList(); 084 int i = Collections.binarySearch(diffs, snapshotId); 085 BlockInfoContiguous[] blocks = null; 086 for(i = i >= 0 ? i+1 : -i-1; i < diffs.size(); i++) { 087 blocks = diffs.get(i).getBlocks(); 088 if(blocks != null) { 089 break; 090 } 091 } 092 return blocks; 093 } 094 095 /** 096 * Copy blocks from the removed snapshot into the previous snapshot 097 * up to the file length of the latter. 098 * Collect unused blocks of the removed snapshot. 099 */ 100 void combineAndCollectSnapshotBlocks(BlockStoragePolicySuite bsps, INodeFile file, 101 FileDiff removed, 102 BlocksMapUpdateInfo collectedBlocks, 103 List<INode> removedINodes) { 104 BlockInfoContiguous[] removedBlocks = removed.getBlocks(); 105 if(removedBlocks == null) { 106 FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature(); 107 assert sf != null : "FileWithSnapshotFeature is null"; 108 if(sf.isCurrentFileDeleted()) 109 sf.collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes); 110 return; 111 } 112 int p = getPrior(removed.getSnapshotId(), true); 113 FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : getDiffById(p); 114 // Copy blocks to the previous snapshot if not set already 115 if(earlierDiff != null) 116 earlierDiff.setBlocks(removedBlocks); 117 BlockInfoContiguous[] earlierBlocks = 118 (earlierDiff == null ? new BlockInfoContiguous[]{} : earlierDiff.getBlocks()); 119 // Find later snapshot (or file itself) with blocks 120 BlockInfoContiguous[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId()); 121 laterBlocks = (laterBlocks==null) ? file.getBlocks() : laterBlocks; 122 // Skip blocks, which belong to either the earlier or the later lists 123 int i = 0; 124 for(; i < removedBlocks.length; i++) { 125 if(i < earlierBlocks.length && removedBlocks[i] == earlierBlocks[i]) 126 continue; 127 if(i < laterBlocks.length && removedBlocks[i] == laterBlocks[i]) 128 continue; 129 break; 130 } 131 // Check if last block is part of truncate recovery 132 BlockInfoContiguous lastBlock = file.getLastBlock(); 133 Block dontRemoveBlock = null; 134 if(lastBlock != null && lastBlock.getBlockUCState().equals( 135 HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) { 136 dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock) 137 .getTruncateBlock(); 138 } 139 // Collect the remaining blocks of the file, ignoring truncate block 140 for(;i < removedBlocks.length; i++) { 141 if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) { 142 collectedBlocks.addDeleteBlock(removedBlocks[i]); 143 } 144 } 145 } 146}