001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.namenode; 019 020import org.apache.hadoop.HadoopIllegalArgumentException; 021import org.apache.hadoop.fs.PathIsNotDirectoryException; 022import org.apache.hadoop.fs.StorageType; 023import org.apache.hadoop.fs.UnresolvedLinkException; 024import org.apache.hadoop.fs.XAttr; 025import org.apache.hadoop.fs.XAttrSetFlag; 026import org.apache.hadoop.fs.permission.FsAction; 027import org.apache.hadoop.fs.permission.FsPermission; 028import org.apache.hadoop.hdfs.protocol.Block; 029import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; 030import org.apache.hadoop.hdfs.protocol.HdfsConstants; 031import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; 032import org.apache.hadoop.hdfs.protocol.QuotaExceededException; 033import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; 034import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; 035import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; 036import org.apache.hadoop.hdfs.util.EnumCounters; 037import org.apache.hadoop.security.AccessControlException; 038 039import java.io.FileNotFoundException; 040import java.io.IOException; 041import java.util.Arrays; 042import java.util.EnumSet; 043import java.util.List; 044 045import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY; 046import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY; 047import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY; 048 049public class FSDirAttrOp { 050 static HdfsFileStatus setPermission( 051 FSDirectory fsd, final String srcArg, FsPermission permission) 052 throws IOException { 053 String src = srcArg; 054 FSPermissionChecker pc = fsd.getPermissionChecker(); 055 byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); 056 INodesInPath iip; 057 fsd.writeLock(); 058 try { 059 src = fsd.resolvePath(pc, src, pathComponents); 060 iip = fsd.getINodesInPath4Write(src); 061 fsd.checkOwner(pc, iip); 062 unprotectedSetPermission(fsd, src, permission); 063 } finally { 064 fsd.writeUnlock(); 065 } 066 fsd.getEditLog().logSetPermissions(src, permission); 067 return fsd.getAuditFileInfo(iip); 068 } 069 070 static HdfsFileStatus setOwner( 071 FSDirectory fsd, String src, String username, String group) 072 throws IOException { 073 FSPermissionChecker pc = fsd.getPermissionChecker(); 074 byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); 075 INodesInPath iip; 076 fsd.writeLock(); 077 try { 078 src = fsd.resolvePath(pc, src, pathComponents); 079 iip = fsd.getINodesInPath4Write(src); 080 fsd.checkOwner(pc, iip); 081 if (!pc.isSuperUser()) { 082 if (username != null && !pc.getUser().equals(username)) { 083 throw new AccessControlException("Non-super user cannot change owner"); 084 } 085 if (group != null && !pc.containsGroup(group)) { 086 throw new AccessControlException("User does not belong to " + group); 087 } 088 } 089 unprotectedSetOwner(fsd, src, username, group); 090 } finally { 091 fsd.writeUnlock(); 092 } 093 fsd.getEditLog().logSetOwner(src, username, group); 094 return fsd.getAuditFileInfo(iip); 095 } 096 097 static HdfsFileStatus setTimes( 098 FSDirectory fsd, String src, long mtime, long atime) 099 throws IOException { 100 if (!fsd.isAccessTimeSupported() && atime != -1) { 101 throw new IOException( 102 "Access time for hdfs is not configured. " + 103 " Please set " + DFS_NAMENODE_ACCESSTIME_PRECISION_KEY 104 + " configuration parameter."); 105 } 106 107 FSPermissionChecker pc = fsd.getPermissionChecker(); 108 byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); 109 110 INodesInPath iip; 111 fsd.writeLock(); 112 try { 113 src = fsd.resolvePath(pc, src, pathComponents); 114 iip = fsd.getINodesInPath4Write(src); 115 // Write access is required to set access and modification times 116 if (fsd.isPermissionEnabled()) { 117 fsd.checkPathAccess(pc, iip, FsAction.WRITE); 118 } 119 final INode inode = iip.getLastINode(); 120 if (inode == null) { 121 throw new FileNotFoundException("File/Directory " + src + 122 " does not exist."); 123 } 124 boolean changed = unprotectedSetTimes(fsd, inode, mtime, atime, true, 125 iip.getLatestSnapshotId()); 126 if (changed) { 127 fsd.getEditLog().logTimes(src, mtime, atime); 128 } 129 } finally { 130 fsd.writeUnlock(); 131 } 132 return fsd.getAuditFileInfo(iip); 133 } 134 135 static boolean setReplication( 136 FSDirectory fsd, BlockManager bm, String src, final short replication) 137 throws IOException { 138 bm.verifyReplication(src, replication, null); 139 final boolean isFile; 140 FSPermissionChecker pc = fsd.getPermissionChecker(); 141 byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); 142 fsd.writeLock(); 143 try { 144 src = fsd.resolvePath(pc, src, pathComponents); 145 final INodesInPath iip = fsd.getINodesInPath4Write(src); 146 if (fsd.isPermissionEnabled()) { 147 fsd.checkPathAccess(pc, iip, FsAction.WRITE); 148 } 149 150 final short[] blockRepls = new short[2]; // 0: old, 1: new 151 final Block[] blocks = unprotectedSetReplication(fsd, src, replication, 152 blockRepls); 153 isFile = blocks != null; 154 if (isFile) { 155 fsd.getEditLog().logSetReplication(src, replication); 156 bm.setReplication(blockRepls[0], blockRepls[1], src, blocks); 157 } 158 } finally { 159 fsd.writeUnlock(); 160 } 161 return isFile; 162 } 163 164 static HdfsFileStatus setStoragePolicy( 165 FSDirectory fsd, BlockManager bm, String src, final String policyName) 166 throws IOException { 167 if (!fsd.isStoragePolicyEnabled()) { 168 throw new IOException( 169 "Failed to set storage policy since " 170 + DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false."); 171 } 172 FSPermissionChecker pc = fsd.getPermissionChecker(); 173 byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); 174 INodesInPath iip; 175 fsd.writeLock(); 176 try { 177 src = FSDirectory.resolvePath(src, pathComponents, fsd); 178 iip = fsd.getINodesInPath4Write(src); 179 180 if (fsd.isPermissionEnabled()) { 181 fsd.checkPathAccess(pc, iip, FsAction.WRITE); 182 } 183 184 // get the corresponding policy and make sure the policy name is valid 185 BlockStoragePolicy policy = bm.getStoragePolicy(policyName); 186 if (policy == null) { 187 throw new HadoopIllegalArgumentException( 188 "Cannot find a block policy with the name " + policyName); 189 } 190 unprotectedSetStoragePolicy(fsd, bm, iip, policy.getId()); 191 fsd.getEditLog().logSetStoragePolicy(src, policy.getId()); 192 } finally { 193 fsd.writeUnlock(); 194 } 195 return fsd.getAuditFileInfo(iip); 196 } 197 198 static BlockStoragePolicy[] getStoragePolicies(BlockManager bm) 199 throws IOException { 200 return bm.getStoragePolicies(); 201 } 202 203 static long getPreferredBlockSize(FSDirectory fsd, String src) 204 throws IOException { 205 FSPermissionChecker pc = fsd.getPermissionChecker(); 206 byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); 207 fsd.readLock(); 208 try { 209 src = fsd.resolvePath(pc, src, pathComponents); 210 final INodesInPath iip = fsd.getINodesInPath(src, false); 211 if (fsd.isPermissionEnabled()) { 212 fsd.checkTraverse(pc, iip); 213 } 214 return INodeFile.valueOf(iip.getLastINode(), src) 215 .getPreferredBlockSize(); 216 } finally { 217 fsd.readUnlock(); 218 } 219 } 220 221 /** 222 * Set the namespace, storagespace and typespace quota for a directory. 223 * 224 * Note: This does not support ".inodes" relative path. 225 */ 226 static void setQuota(FSDirectory fsd, String src, long nsQuota, long ssQuota, 227 StorageType type) throws IOException { 228 if (fsd.isPermissionEnabled()) { 229 FSPermissionChecker pc = fsd.getPermissionChecker(); 230 pc.checkSuperuserPrivilege(); 231 } 232 233 fsd.writeLock(); 234 try { 235 INodeDirectory changed = unprotectedSetQuota(fsd, src, nsQuota, ssQuota, type); 236 if (changed != null) { 237 final QuotaCounts q = changed.getQuotaCounts(); 238 if (type == null) { 239 fsd.getEditLog().logSetQuota(src, q.getNameSpace(), q.getStorageSpace()); 240 } else { 241 fsd.getEditLog().logSetQuotaByStorageType( 242 src, q.getTypeSpaces().get(type), type); 243 } 244 } 245 } finally { 246 fsd.writeUnlock(); 247 } 248 } 249 250 static void unprotectedSetPermission( 251 FSDirectory fsd, String src, FsPermission permissions) 252 throws FileNotFoundException, UnresolvedLinkException, 253 QuotaExceededException, SnapshotAccessControlException { 254 assert fsd.hasWriteLock(); 255 final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true); 256 final INode inode = inodesInPath.getLastINode(); 257 if (inode == null) { 258 throw new FileNotFoundException("File does not exist: " + src); 259 } 260 int snapshotId = inodesInPath.getLatestSnapshotId(); 261 inode.setPermission(permissions, snapshotId); 262 } 263 264 static void unprotectedSetOwner( 265 FSDirectory fsd, String src, String username, String groupname) 266 throws FileNotFoundException, UnresolvedLinkException, 267 QuotaExceededException, SnapshotAccessControlException { 268 assert fsd.hasWriteLock(); 269 final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true); 270 INode inode = inodesInPath.getLastINode(); 271 if (inode == null) { 272 throw new FileNotFoundException("File does not exist: " + src); 273 } 274 if (username != null) { 275 inode = inode.setUser(username, inodesInPath.getLatestSnapshotId()); 276 } 277 if (groupname != null) { 278 inode.setGroup(groupname, inodesInPath.getLatestSnapshotId()); 279 } 280 } 281 282 static boolean setTimes( 283 FSDirectory fsd, INode inode, long mtime, long atime, boolean force, 284 int latestSnapshotId) throws QuotaExceededException { 285 fsd.writeLock(); 286 try { 287 return unprotectedSetTimes(fsd, inode, mtime, atime, force, 288 latestSnapshotId); 289 } finally { 290 fsd.writeUnlock(); 291 } 292 } 293 294 static boolean unprotectedSetTimes( 295 FSDirectory fsd, String src, long mtime, long atime, boolean force) 296 throws UnresolvedLinkException, QuotaExceededException { 297 assert fsd.hasWriteLock(); 298 final INodesInPath i = fsd.getINodesInPath(src, true); 299 return unprotectedSetTimes(fsd, i.getLastINode(), mtime, atime, 300 force, i.getLatestSnapshotId()); 301 } 302 303 /** 304 * See {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, 305 * long, long, StorageType)} 306 * for the contract. 307 * Sets quota for for a directory. 308 * @return INodeDirectory if any of the quotas have changed. null otherwise. 309 * @throws FileNotFoundException if the path does not exist. 310 * @throws PathIsNotDirectoryException if the path is not a directory. 311 * @throws QuotaExceededException if the directory tree size is 312 * greater than the given quota 313 * @throws UnresolvedLinkException if a symlink is encountered in src. 314 * @throws SnapshotAccessControlException if path is in RO snapshot 315 */ 316 static INodeDirectory unprotectedSetQuota( 317 FSDirectory fsd, String src, long nsQuota, long ssQuota, StorageType type) 318 throws FileNotFoundException, PathIsNotDirectoryException, 319 QuotaExceededException, UnresolvedLinkException, 320 SnapshotAccessControlException, UnsupportedActionException { 321 assert fsd.hasWriteLock(); 322 // sanity check 323 if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET && 324 nsQuota != HdfsConstants.QUOTA_RESET) || 325 (ssQuota < 0 && ssQuota != HdfsConstants.QUOTA_DONT_SET && 326 ssQuota != HdfsConstants.QUOTA_RESET)) { 327 throw new IllegalArgumentException("Illegal value for nsQuota or " + 328 "ssQuota : " + nsQuota + " and " + 329 ssQuota); 330 } 331 // sanity check for quota by storage type 332 if ((type != null) && (!fsd.isQuotaByStorageTypeEnabled() || 333 nsQuota != HdfsConstants.QUOTA_DONT_SET)) { 334 throw new UnsupportedActionException( 335 "Failed to set quota by storage type because either" + 336 DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY + " is set to " + 337 fsd.isQuotaByStorageTypeEnabled() + " or nsQuota value is illegal " + 338 nsQuota); 339 } 340 341 String srcs = FSDirectory.normalizePath(src); 342 final INodesInPath iip = fsd.getINodesInPath4Write(srcs, true); 343 INodeDirectory dirNode = INodeDirectory.valueOf(iip.getLastINode(), srcs); 344 if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) { 345 throw new IllegalArgumentException("Cannot clear namespace quota on root."); 346 } else { // a directory inode 347 final QuotaCounts oldQuota = dirNode.getQuotaCounts(); 348 final long oldNsQuota = oldQuota.getNameSpace(); 349 final long oldSsQuota = oldQuota.getStorageSpace(); 350 351 if (nsQuota == HdfsConstants.QUOTA_DONT_SET) { 352 nsQuota = oldNsQuota; 353 } 354 if (ssQuota == HdfsConstants.QUOTA_DONT_SET) { 355 ssQuota = oldSsQuota; 356 } 357 358 // unchanged space/namespace quota 359 if (type == null && oldNsQuota == nsQuota && oldSsQuota == ssQuota) { 360 return null; 361 } 362 363 // unchanged type quota 364 if (type != null) { 365 EnumCounters<StorageType> oldTypeQuotas = oldQuota.getTypeSpaces(); 366 if (oldTypeQuotas != null && oldTypeQuotas.get(type) == ssQuota) { 367 return null; 368 } 369 } 370 371 final int latest = iip.getLatestSnapshotId(); 372 dirNode.recordModification(latest); 373 dirNode.setQuota(fsd.getBlockStoragePolicySuite(), nsQuota, ssQuota, type); 374 return dirNode; 375 } 376 } 377 378 static Block[] unprotectedSetReplication( 379 FSDirectory fsd, String src, short replication, short[] blockRepls) 380 throws QuotaExceededException, UnresolvedLinkException, 381 SnapshotAccessControlException { 382 assert fsd.hasWriteLock(); 383 384 final INodesInPath iip = fsd.getINodesInPath4Write(src, true); 385 final INode inode = iip.getLastINode(); 386 if (inode == null || !inode.isFile()) { 387 return null; 388 } 389 INodeFile file = inode.asFile(); 390 final short oldBR = file.getBlockReplication(); 391 392 // before setFileReplication, check for increasing block replication. 393 // if replication > oldBR, then newBR == replication. 394 // if replication < oldBR, we don't know newBR yet. 395 if (replication > oldBR) { 396 long dsDelta = file.storagespaceConsumed()/oldBR; 397 fsd.updateCount(iip, 0L, dsDelta, oldBR, replication, true); 398 } 399 400 file.setFileReplication(replication, iip.getLatestSnapshotId()); 401 402 final short newBR = file.getBlockReplication(); 403 // check newBR < oldBR case. 404 if (newBR < oldBR) { 405 long dsDelta = file.storagespaceConsumed()/newBR; 406 fsd.updateCount(iip, 0L, dsDelta, oldBR, newBR, true); 407 } 408 409 if (blockRepls != null) { 410 blockRepls[0] = oldBR; 411 blockRepls[1] = newBR; 412 } 413 return file.getBlocks(); 414 } 415 416 static void unprotectedSetStoragePolicy( 417 FSDirectory fsd, BlockManager bm, INodesInPath iip, byte policyId) 418 throws IOException { 419 assert fsd.hasWriteLock(); 420 final INode inode = iip.getLastINode(); 421 if (inode == null) { 422 throw new FileNotFoundException("File/Directory does not exist: " 423 + iip.getPath()); 424 } 425 final int snapshotId = iip.getLatestSnapshotId(); 426 if (inode.isFile()) { 427 BlockStoragePolicy newPolicy = bm.getStoragePolicy(policyId); 428 if (newPolicy.isCopyOnCreateFile()) { 429 throw new HadoopIllegalArgumentException( 430 "Policy " + newPolicy + " cannot be set after file creation."); 431 } 432 433 BlockStoragePolicy currentPolicy = 434 bm.getStoragePolicy(inode.getLocalStoragePolicyID()); 435 436 if (currentPolicy != null && currentPolicy.isCopyOnCreateFile()) { 437 throw new HadoopIllegalArgumentException( 438 "Existing policy " + currentPolicy.getName() + 439 " cannot be changed after file creation."); 440 } 441 inode.asFile().setStoragePolicyID(policyId, snapshotId); 442 } else if (inode.isDirectory()) { 443 setDirStoragePolicy(fsd, inode.asDirectory(), policyId, snapshotId); 444 } else { 445 throw new FileNotFoundException(iip.getPath() 446 + " is not a file or directory"); 447 } 448 } 449 450 private static void setDirStoragePolicy( 451 FSDirectory fsd, INodeDirectory inode, byte policyId, 452 int latestSnapshotId) throws IOException { 453 List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode); 454 XAttr xAttr = BlockStoragePolicySuite.buildXAttr(policyId); 455 List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs, 456 Arrays.asList(xAttr), 457 EnumSet.of( 458 XAttrSetFlag.CREATE, 459 XAttrSetFlag.REPLACE)); 460 XAttrStorage.updateINodeXAttrs(inode, newXAttrs, latestSnapshotId); 461 } 462 463 private static boolean unprotectedSetTimes( 464 FSDirectory fsd, INode inode, long mtime, long atime, boolean force, 465 int latest) throws QuotaExceededException { 466 assert fsd.hasWriteLock(); 467 boolean status = false; 468 if (mtime != -1) { 469 inode = inode.setModificationTime(mtime, latest); 470 status = true; 471 } 472 if (atime != -1) { 473 long inodeTime = inode.getAccessTime(); 474 475 // if the last access time update was within the last precision interval, then 476 // no need to store access time 477 if (atime <= inodeTime + fsd.getFSNamesystem().getAccessTimePrecision() 478 && !force) { 479 status = false; 480 } else { 481 inode.setAccessTime(atime, latest); 482 status = true; 483 } 484 } 485 return status; 486 } 487}