From 14dc2f18cb0c1f97332662429cff1f7d794de94d Mon Sep 17 00:00:00 2001 From: Andreas Joachim Peters Date: Wed, 19 Jun 2024 15:23:20 +0200 Subject: [PATCH] S3: reformat code --- src/XrdS3/XrdS3.cc | 67 ++- src/XrdS3/XrdS3.hh | 9 +- src/XrdS3/XrdS3Action.hh | 3 +- src/XrdS3/XrdS3Api.cc | 4 +- src/XrdS3/XrdS3Api.hh | 14 +- src/XrdS3/XrdS3Auth.cc | 8 +- src/XrdS3/XrdS3Auth.hh | 17 +- src/XrdS3/XrdS3Crypt.cc | 27 +- src/XrdS3/XrdS3Crypt.hh | 8 +- src/XrdS3/XrdS3ErrorResponse.hh | 2 +- src/XrdS3/XrdS3Log.hh | 127 +++--- src/XrdS3/XrdS3ObjectStore.cc | 773 ++++++++++++++++++++++---------- src/XrdS3/XrdS3ObjectStore.hh | 81 ++-- src/XrdS3/XrdS3Req.cc | 6 +- src/XrdS3/XrdS3Req.hh | 1 - src/XrdS3/XrdS3Response.cc | 26 +- src/XrdS3/XrdS3Router.cc | 18 +- src/XrdS3/XrdS3Router.hh | 5 +- src/XrdS3/XrdS3ScopedFsId.hh | 48 +- src/XrdS3/XrdS3Utils.cc | 10 +- src/XrdS3/XrdS3Utils.hh | 4 +- src/XrdS3/XrdS3XAttr.hh | 35 +- src/XrdS3/XrdS3Xml.hh | 2 +- 23 files changed, 820 insertions(+), 475 deletions(-) diff --git a/src/XrdS3/XrdS3.cc b/src/XrdS3/XrdS3.cc index 14a610da4d5..5af377240bb 100644 --- a/src/XrdS3/XrdS3.cc +++ b/src/XrdS3/XrdS3.cc @@ -26,6 +26,7 @@ #include "XrdS3.hh" //------------------------------------------------------------------------------ #include + #include #include //------------------------------------------------------------------------------ @@ -43,10 +44,10 @@ //! XRootD S3 plug-in implementation //------------------------------------------------------------------------------ - namespace S3 { XrdVERSIONINFO(XrdHttpGetExtHandler, HttpS3); -S3Handler* S3Handler::sInstance = nullptr; // for convenience to get access to our logger +S3Handler *S3Handler::sInstance = + nullptr; // for convenience to get access to our logger //------------------------------------------------------------------------------ //! This is the default handler for requests that are not handled by the //! router. It returns a 404 error. @@ -68,7 +69,7 @@ S3Handler::S3Handler(XrdSysError *log, const char *config, XrdOucEnv *myEnv) S3Handler::sInstance = this; mLog.Init(&mErr); - S3::ScopedFsId::Validate(); // verify we can switch filesystem IDs ! + S3::ScopedFsId::Validate(); // verify we can switch filesystem IDs ! if (!ParseConfig(config, *myEnv)) { throw std::runtime_error("Failed to configure the HTTP S3 handler."); @@ -537,62 +538,84 @@ bool S3Handler::ParseConfig(const char *config, XrdOucEnv &env) { const char *val; - mErr.setMsgMask(LogMask::ERROR | LogMask::WARN); // by default don't log so much + mErr.setMsgMask(LogMask::ERROR | + LogMask::WARN); // by default don't log so much while ((val = Config.GetMyFirstWord())) { if (!strcmp("s3.config", val)) { if (!(val = Config.GetWord())) { Config.Close(); - std::cerr << "error: s3.config value not defined" << std::endl; + std::cerr << "error: s3.config value not defined" << std::endl; return false; } mConfig.config_dir = val; } else if (!strcmp("s3.region", val)) { if (!(val = Config.GetWord())) { Config.Close(); - std::cerr << "error: s3.region value not defined" << std::endl; + std::cerr << "error: s3.region value not defined" << std::endl; return false; } mConfig.region = val; } else if (!strcmp("s3.service", val)) { if (!(val = Config.GetWord())) { Config.Close(); - std::cerr << "error: s3.service value not defined" << std::endl; + std::cerr << "error: s3.service value not defined" << std::endl; return false; } mConfig.service = val; } else if (!strcmp("s3.multipart", val)) { if (!(val = Config.GetWord())) { Config.Close(); - std::cerr << "error: s3.multipart value not defined" << std::endl; + std::cerr << "error: s3.multipart value not defined" << std::endl; return false; } mConfig.multipart_upload_dir = val; } else if (!strcmp("s3.trace", val)) { if (!(val = Config.GetWord())) { Config.Close(); - std::cerr << "error: s3.trace value not defined" << std::endl; + std::cerr << "error: s3.trace value not defined" << std::endl; return false; } mConfig.trace = val; mErr.setMsgMask(0); - if (!strcmp(val, "all")) {mErr.setMsgMask(LogMask::ALL);} - else if (!strcmp(val, "error")) {mErr.setMsgMask(LogMask::ERROR);} - else if (!strcmp(val, "warning")) {mErr.setMsgMask(LogMask::ERROR | LogMask::WARN);} - else if (!strcmp(val, "info")) {mErr.setMsgMask(LogMask::ERROR | LogMask::WARN | LogMask::INFO);} - else if (!strcmp(val, "debug")) {mErr.setMsgMask(LogMask::ERROR | LogMask::WARN | LogMask::INFO | LogMask::DEBUG);} - else if (!strcmp(val, "none")) {mErr.setMsgMask(0);} - else { - std::cerr << "error: s3.trace encountered an unknown directive: " << val << std::endl; + if (!strcmp(val, "all")) { + mErr.setMsgMask(LogMask::ALL); + } else if (!strcmp(val, "error")) { + mErr.setMsgMask(LogMask::ERROR); + } else if (!strcmp(val, "warning")) { + mErr.setMsgMask(LogMask::ERROR | LogMask::WARN); + } else if (!strcmp(val, "info")) { + mErr.setMsgMask(LogMask::ERROR | LogMask::WARN | LogMask::INFO); + } else if (!strcmp(val, "debug")) { + mErr.setMsgMask(LogMask::ERROR | LogMask::WARN | LogMask::INFO | + LogMask::DEBUG); + } else if (!strcmp(val, "none")) { + mErr.setMsgMask(0); + } else { + std::cerr << "error: s3.trace encountered an unknown directive: " << val + << std::endl; return false; - } } + } + } } Config.Close(); - if (mConfig.config_dir.empty()) { std::cerr << "error: s3.config not defined in configuration file" << std::endl; } - if (mConfig.service.empty()) { std::cerr << "error: s3.service not defined in configuration file" << std::endl; } - if (mConfig.region.empty()) { std::cerr << "error: s3.region not defined in configuration file" << std::endl; } - if (mConfig.multipart_upload_dir.empty()) { std::cerr << "error: s3.multipar not defined in configuration file" << std::endl; } + if (mConfig.config_dir.empty()) { + std::cerr << "error: s3.config not defined in configuration file" + << std::endl; + } + if (mConfig.service.empty()) { + std::cerr << "error: s3.service not defined in configuration file" + << std::endl; + } + if (mConfig.region.empty()) { + std::cerr << "error: s3.region not defined in configuration file" + << std::endl; + } + if (mConfig.multipart_upload_dir.empty()) { + std::cerr << "error: s3.multipar not defined in configuration file" + << std::endl; + } return (!mConfig.config_dir.empty() && !mConfig.service.empty() && !mConfig.region.empty() && !mConfig.multipart_upload_dir.empty()); diff --git a/src/XrdS3/XrdS3.hh b/src/XrdS3/XrdS3.hh index 7e649488864..12e261a1bf2 100644 --- a/src/XrdS3/XrdS3.hh +++ b/src/XrdS3/XrdS3.hh @@ -33,13 +33,12 @@ #include "XrdS3Api.hh" #include "XrdS3Auth.hh" #include "XrdS3Crypt.hh" +#include "XrdS3Log.hh" #include "XrdS3Router.hh" #include "XrdS3Utils.hh" -#include "XrdS3Log.hh" #include "XrdSys/XrdSysError.hh" //------------------------------------------------------------------------------ - namespace S3 { //------------------------------------------------------------------------------ //! \brief S3Handler is a class that implements the XRootD HTTP extension @@ -58,10 +57,10 @@ class S3Handler : public XrdHttpExtHandler { // Abstract method in the base class, but does not seem to be used int Init(const char *cfgfile) override { return 0; } - static S3Handler* sInstance; - static S3Log* Logger() { return sInstance->GetLogger(); } + static S3Handler *sInstance; + static S3Log *Logger() { return sInstance->GetLogger(); } - S3Log* GetLogger() { return &mLog; } + S3Log *GetLogger() { return &mLog; } Context ctx; diff --git a/src/XrdS3/XrdS3Action.hh b/src/XrdS3/XrdS3Action.hh index 09b1558761e..e26f27fab77 100644 --- a/src/XrdS3/XrdS3Action.hh +++ b/src/XrdS3/XrdS3Action.hh @@ -128,5 +128,4 @@ enum class Action { WriteGetObjectResponse, }; -} // namespace S - +} // namespace S3 diff --git a/src/XrdS3/XrdS3Api.cc b/src/XrdS3/XrdS3Api.cc index 439ab303e97..0203f69f775 100644 --- a/src/XrdS3/XrdS3Api.cc +++ b/src/XrdS3/XrdS3Api.cc @@ -20,7 +20,7 @@ // In applying this licence, CERN does not waive the privileges and immunities // granted to it by virtue of its status as an Intergovernmental Organization // or submit itself to any jurisdiction. -//------------------------------------------------------------------------------ +//--------------------------å---------------------------------------------------- //------------------------------------------------------------------------------ #include "XrdS3Api.hh" @@ -28,10 +28,10 @@ #include //------------------------------------------------------------------------------ #include "XrdCks/XrdCksCalcmd5.hh" +#include "XrdS3.hh" #include "XrdS3Auth.hh" #include "XrdS3ErrorResponse.hh" #include "XrdS3Response.hh" -#include "XrdS3.hh" //------------------------------------------------------------------------------ namespace S3 { diff --git a/src/XrdS3/XrdS3Api.hh b/src/XrdS3/XrdS3Api.hh index 95b80661ce6..aae87064a28 100644 --- a/src/XrdS3/XrdS3Api.hh +++ b/src/XrdS3/XrdS3Api.hh @@ -24,11 +24,11 @@ #pragma once #include -#include -#include #include #include +#include #include +#include #include "XrdS3Auth.hh" #include "XrdS3ObjectStore.hh" @@ -46,9 +46,10 @@ namespace S3 { class S3Api { public: S3Api() = default; - S3Api(const std::string &config_path, const std::string ®ion, const std::string &service, - const std::string &mtpu_path) - : objectStore(config_path, mtpu_path), auth(config_path, region, service) {} + S3Api(const std::string &config_path, const std::string ®ion, + const std::string &service, const std::string &mtpu_path) + : objectStore(config_path, mtpu_path), + auth(config_path, region, service) {} ~S3Api() = default; @@ -310,10 +311,9 @@ class S3Api { } private: - S3Log* mLog; + S3Log *mLog; S3ObjectStore objectStore; S3Auth auth; }; } // namespace S3 - diff --git a/src/XrdS3/XrdS3Auth.cc b/src/XrdS3/XrdS3Auth.cc index 4023ca38f66..6ed4a9cfd17 100644 --- a/src/XrdS3/XrdS3Auth.cc +++ b/src/XrdS3/XrdS3Auth.cc @@ -4,10 +4,12 @@ //------------------------------------------------------------------------------ #include "XrdS3Auth.hh" + #include "XrdS3.hh" //------------------------------------------------------------------------------ #include #include + #include #include //------------------------------------------------------------------------------ @@ -237,7 +239,9 @@ S3Error S3Auth::VerifySigV4(XrdS3Req &req) { AWS4_ALGORITHM, req.date, canonical_request_hash, sig.credentials); const auto signature = GetSignature(key, sig.credentials, string_to_sign); - S3::S3Handler::Logger()->Log(S3::DEBUG, "VerifySignature", "sign=%s calc-sign=%s", sig.signature.c_str(), signature.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "VerifySignature", + "sign=%s calc-sign=%s", sig.signature.c_str(), + signature.c_str()); if (signature == sig.signature) { return S3Error::None; @@ -413,7 +417,7 @@ S3Auth::S3Auth(const std::filesystem::path &path, std::string region, std::string access_key_id = entry->d_name; auto filepath = keystore / access_key_id; - + auto user_id = S3Utils::GetXattr(filepath, "user"); if (user_id.empty()) { continue; diff --git a/src/XrdS3/XrdS3Auth.hh b/src/XrdS3/XrdS3Auth.hh index 3e93764819d..68a3ac2ef73 100644 --- a/src/XrdS3/XrdS3Auth.hh +++ b/src/XrdS3/XrdS3Auth.hh @@ -25,16 +25,17 @@ #pragma once //------------------------------------------------------------------------------ +#include +#include + #include #include #include #include -#include -#include //------------------------------------------------------------------------------ -#include "XrdS3ErrorResponse.hh" #include "XrdS3Action.hh" #include "XrdS3Crypt.hh" +#include "XrdS3ErrorResponse.hh" #include "XrdS3Req.hh" //------------------------------------------------------------------------------ @@ -88,11 +89,11 @@ class S3Auth { // translate username struct passwd *pwd = getpwnam(id.c_str()); if (pwd == nullptr) { - uid=99; - gid=99; + uid = 99; + gid = 99; } else { - uid = pwd->pw_uid; - gid = pwd->pw_gid; + uid = pwd->pw_uid; + gid = pwd->pw_gid; } } }; @@ -162,5 +163,3 @@ class S3Auth { }; } // namespace S3 - - diff --git a/src/XrdS3/XrdS3Crypt.cc b/src/XrdS3/XrdS3Crypt.cc index dc1f17323ee..6d3a7b71454 100644 --- a/src/XrdS3/XrdS3Crypt.cc +++ b/src/XrdS3/XrdS3Crypt.cc @@ -1,11 +1,32 @@ +//------------------------------------------------------------------------------ +// Copyright (c) 2024 by European Organization for Nuclear Research (CERN) +// Author: Mano Segransan / CERN EOS Project +//------------------------------------------------------------------------------ +// This file is part of the XRootD software suite. +// +// XRootD is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// XRootD is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. // -// Created by segransm on 11/3/23. +// You should have received a copy of the GNU Lesser General Public License +// along with XRootD. If not, see . // +// In applying this licence, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +//------------------------------------------------------------------------------ //------------------------------------------------------------------------------ #include "XrdS3Crypt.hh" //------------------------------------------------------------------------------ #include + #include #include //------------------------------------------------------------------------------ @@ -17,7 +38,7 @@ namespace S3 { //! \brief SHA256 implementation using OpenSSL //------------------------------------------------------------------------------ S3Crypt::S3SHA256::S3SHA256() { - md = (EVP_MD*)EVP_sha256(); + md = (EVP_MD *)EVP_sha256(); if (md == nullptr) { throw std::bad_alloc(); } @@ -86,7 +107,7 @@ S3Crypt::S3SHA256::~S3SHA256() { void S3Crypt::S3SHA256::Init() { EVP_DigestInit_ex2(ctx, nullptr, nullptr); } #endif - + //------------------------------------------------------------------------------ //! \brief Update the digest //! @param src The source buffer diff --git a/src/XrdS3/XrdS3Crypt.hh b/src/XrdS3/XrdS3Crypt.hh index db173e95f95..db4cf09b535 100644 --- a/src/XrdS3/XrdS3Crypt.hh +++ b/src/XrdS3/XrdS3Crypt.hh @@ -69,9 +69,9 @@ class S3Crypt { return digest; } -//------------------------------------------------------------------------------ -//! S3SHA256 - SHA256 hash -//------------------------------------------------------------------------------ + //------------------------------------------------------------------------------ + //! S3SHA256 - SHA256 hash + //------------------------------------------------------------------------------ class S3SHA256 { public: S3SHA256(); @@ -141,5 +141,3 @@ class S3Crypt { }; } // namespace S3 - - diff --git a/src/XrdS3/XrdS3ErrorResponse.hh b/src/XrdS3/XrdS3ErrorResponse.hh index 2944ddbf32e..ff6b9243091 100644 --- a/src/XrdS3/XrdS3ErrorResponse.hh +++ b/src/XrdS3/XrdS3ErrorResponse.hh @@ -31,7 +31,7 @@ namespace S3 { //------------------------------------------------------------------------------ -//! \brief S3 error code +//! \brief S3 error code //------------------------------------------------------------------------------ struct S3ErrorCode { std::string code; diff --git a/src/XrdS3/XrdS3Log.hh b/src/XrdS3/XrdS3Log.hh index 5245b6b68c7..1b55620cf54 100644 --- a/src/XrdS3/XrdS3Log.hh +++ b/src/XrdS3/XrdS3Log.hh @@ -23,84 +23,85 @@ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ -#include -#include #include +#include + +#include //------------------------------------------------------------------------------ #include "XrdSys/XrdSysError.hh" //------------------------------------------------------------------------------ #pragma once namespace S3 { - //------------------------------------------------------------------------------ - //! \brief enum defining logging levels - //------------------------------------------------------------------------------ - enum LogMask { - DEBUG = 0x01, - INFO = 0x02, - WARN = 0x04, - ERROR = 0x08, - ALL = 0xff - }; +//------------------------------------------------------------------------------ +//! \brief enum defining logging levels +//------------------------------------------------------------------------------ +enum LogMask { + DEBUG = 0x01, + INFO = 0x02, + WARN = 0x04, + ERROR = 0x08, + ALL = 0xff +}; - //------------------------------------------------------------------------------ - //! \brief class to log from S3 plug-in - //------------------------------------------------------------------------------ - class S3Log { - public: - S3Log(XrdSysError& mErr) : mLog(&mErr), traceId(0) {} - S3Log() {} - virtual ~S3Log() {} +//------------------------------------------------------------------------------ +//! \brief class to log from S3 plug-in +//------------------------------------------------------------------------------ +class S3Log { + public: + S3Log(XrdSysError& mErr) : mLog(&mErr), traceId(0) {} + S3Log() {} + virtual ~S3Log() {} - std::string LogString(int c) { - switch (c) { + std::string LogString(int c) { + switch (c) { case DEBUG: - return "| DEBUG |"; + return "| DEBUG |"; case INFO: - return "| INFO |"; + return "| INFO |"; case WARN: - return "| REQU |"; + return "| REQU |"; case ERROR: - return "| ERROR |"; + return "| ERROR |"; default: - return "| INIT |"; - } - }; - - std::string newTrace() { - std::lock_guard guard(logMutex); - traceId++; - std::stringstream ss; - ss << "[req:" << std::setw(8) << std::setfill('0') << std::hex << traceId << "]"; - return ss.str(); + return "| INIT |"; } - - //! \brief initialize logging - void Init(XrdSysError* log) { mLog = log; } - - //! \brief log message - std::string - Log(S3::LogMask mask, const char* unit, const char* msg, ...) - { - std::lock_guard guard(logMutex); - va_list args; - va_start(args, msg); - vsnprintf(logBuffer, sizeof(logBuffer), msg, args); - va_end(args); - std::string tag = std::string("X") + LogString(mask) + std::string(" ") + unit; - int l = 48-tag.size(); - for ( auto i=0 ; iLog( (int) mask, tag.c_str() , logBuffer ); - return std::string(logBuffer); + std::string newTrace() { + std::lock_guard guard(logMutex); + traceId++; + std::stringstream ss; + ss << "[req:" << std::setw(8) << std::setfill('0') << std::hex << traceId + << "]"; + return ss.str(); + } + + //! \brief initialize logging + void Init(XrdSysError* log) { mLog = log; } + + //! \brief log message + std::string Log(S3::LogMask mask, const char* unit, const char* msg, ...) { + std::lock_guard guard(logMutex); + va_list args; + va_start(args, msg); + vsnprintf(logBuffer, sizeof(logBuffer), msg, args); + va_end(args); + std::string tag = + std::string("X") + LogString(mask) + std::string(" ") + unit; + int l = 48 - tag.size(); + for (auto i = 0; i < l; ++i) { + tag += " "; } - private: - XrdSysError* mLog; - char logBuffer[65535]; - std::mutex logMutex; - uint64_t traceId; - }; -} + mLog->Log((int)mask, tag.c_str(), logBuffer); + return std::string(logBuffer); + } + + private: + XrdSysError* mLog; + char logBuffer[65535]; + std::mutex logMutex; + uint64_t traceId; +}; +} // namespace S3 diff --git a/src/XrdS3/XrdS3ObjectStore.cc b/src/XrdS3/XrdS3ObjectStore.cc index 34865881a79..fe9ae9af31b 100644 --- a/src/XrdS3/XrdS3ObjectStore.cc +++ b/src/XrdS3/XrdS3ObjectStore.cc @@ -37,11 +37,12 @@ #include //------------------------------------------------------------------------------ #include + #include "XrdCks/XrdCksCalcmd5.hh" #include "XrdPosix/XrdPosixExtern.hh" +#include "XrdS3.hh" #include "XrdS3Auth.hh" #include "XrdS3Req.hh" -#include "XrdS3.hh" #include "XrdS3ScopedFsId.hh" //------------------------------------------------------------------------------ S3::S3ObjectStore::ExclusiveLocker S3::S3ObjectStore::s_exclusive_locker; @@ -53,7 +54,7 @@ namespace S3 { //! \param config Path to the configuration file //! \param mtpu Path to the MTPU directory //------------------------------------------------------------------------------ - S3ObjectStore::S3ObjectStore(const std::string &config, const std::string &mtpu) +S3ObjectStore::S3ObjectStore(const std::string &config, const std::string &mtpu) : config_path(config), mtpu_path(mtpu) { user_map = config_path / "users"; @@ -100,9 +101,13 @@ S3Error S3ObjectStore::SetMetadata( const std::string &object, const std::map &metadata) { for (const auto &meta : metadata) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::SetMetaData", "%s:=%s on %s", meta.first.c_str(), meta.second.c_str(), object.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::SetMetaData", + "%s:=%s on %s", meta.first.c_str(), + meta.second.c_str(), object.c_str()); if (S3Utils::SetXattr(object, meta.first, meta.second, 0)) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::SetMetaData", "failed to set %s:=%s on %s", meta.first.c_str(), meta.second.c_str(), object.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::SetMetaData", "failed to set %s:=%s on %s", + meta.first.c_str(), meta.second.c_str(), object.c_str()); std::cerr << "SetMetaData failed on " << object << std::endl; return S3Error::InternalError; } @@ -117,7 +122,8 @@ S3Error S3ObjectStore::SetMetadata( //------------------------------------------------------------------------------ std::vector S3ObjectStore::GetPartsNumber( const std::string &path) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::GetPartsNumber", "%s", path.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::GetPartsNumber", "%s", + path.c_str()); auto p = S3Utils::GetXattr(path, "parts"); if (p.empty()) { @@ -130,7 +136,6 @@ std::vector S3ObjectStore::GetPartsNumber( return res; } - //------------------------------------------------------------------------------ //! SetPartsNumber Set the parts number for a file //! \param path Object path @@ -140,7 +145,8 @@ std::vector S3ObjectStore::GetPartsNumber( S3Error S3ObjectStore::SetPartsNumbers(const std::string &path, std::vector &parts) { auto p = S3Utils::stringJoin(',', parts); - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::SetPartsNumber", "%s : %s", path.c_str(), p.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::SetPartsNumber", + "%s : %s", path.c_str(), p.c_str()); if (S3Utils::SetXattr(path.c_str(), "parts", p, 0)) { return S3Error::InternalError; @@ -158,7 +164,8 @@ S3Error S3ObjectStore::SetPartsNumbers(const std::string &path, S3Error S3ObjectStore::AddPartAttr(const std::string &object, size_t part_number) { s_exclusive_locker.lock(object); - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::AddPartAttr", "%s : %u", object.c_str(), part_number); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::AddPartAttr", "%s : %u", + object.c_str(), part_number); auto parts = GetPartsNumber(object); auto n = std::to_string(part_number); @@ -179,7 +186,9 @@ S3Error S3ObjectStore::AddPartAttr(const std::string &object, //------------------------------------------------------------------------------ S3Error S3ObjectStore::CreateBucket(S3Auth &auth, S3Auth::Bucket bucket, const std::string &_location) { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CreateBucket", "%s => %s", bucket.name.c_str(), _location.c_str()); + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CreateBucket", + "%s => %s", bucket.name.c_str(), + _location.c_str()); if (!ValidateBucketName(bucket.name)) { return S3Error::InvalidBucketName; } @@ -195,12 +204,16 @@ S3Error S3ObjectStore::CreateBucket(S3Auth &auth, S3Auth::Bucket bucket, auto userInfoBucket = user_map / bucket.owner.id / bucket.name; - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CreateBucket", "bucket-path:%s : user-info:%s", bucket.path.c_str(), userInfoBucket.c_str()); - + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CreateBucket", + "bucket-path:%s : user-info:%s", + bucket.path.c_str(), userInfoBucket.c_str()); + auto fd = XrdPosix_Open(userInfoBucket.c_str(), O_CREAT | O_EXCL | O_WRONLY, S_IRWXU | S_IRWXG); if (fd <= 0) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateBucket", "bucket-path:%s failed to open user-info:%s", bucket.path.c_str(), userInfoBucket.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateBucket", + "bucket-path:%s failed to open user-info:%s", + bucket.path.c_str(), userInfoBucket.c_str()); auth.DeleteBucketInfo(bucket); return S3Error::InternalError; } @@ -208,27 +221,37 @@ S3Error S3ObjectStore::CreateBucket(S3Auth &auth, S3Auth::Bucket bucket, if (S3Utils::SetXattr(userInfoBucket, "createdAt", std::to_string(std::time(nullptr)), XATTR_CREATE)) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateBucket", "bucket-path:%s failed to set creation time at user-info:%s", bucket.path.c_str(), userInfoBucket.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CreateBucket", + "bucket-path:%s failed to set creation time at user-info:%s", + bucket.path.c_str(), userInfoBucket.c_str()); auth.DeleteBucketInfo(bucket); XrdPosix_Unlink(userInfoBucket.c_str()); return S3Error::InternalError; } if (XrdPosix_Mkdir((mtpu_path / bucket.name).c_str(), S_IRWXU | S_IRWXG)) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateBucket", "bucket-path:%s failed to create temporary multipart upload directory %s", bucket.path.c_str(), (mtpu_path/bucket.name).c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateBucket", + "bucket-path:%s failed to create temporary " + "multipart upload directory %s", + bucket.path.c_str(), + (mtpu_path / bucket.name).c_str()); auth.DeleteBucketInfo(bucket); XrdPosix_Unlink(userInfoBucket.c_str()); return S3Error::InternalError; } - int mkdir_retc=0; + int mkdir_retc = 0; { // Create the backend directory with the users filesystem id - ScopedFsId scop (bucket.owner.uid,bucket.owner.gid); - mkdir_retc = XrdPosix_Mkdir(bucket.path.c_str(), S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); + ScopedFsId scop(bucket.owner.uid, bucket.owner.gid); + mkdir_retc = XrdPosix_Mkdir( + bucket.path.c_str(), S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); } if (mkdir_retc) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateBucket", "failed to create bucket-path:%s", bucket.path.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateBucket", + "failed to create bucket-path:%s", + bucket.path.c_str()); auth.DeleteBucketInfo(bucket); XrdPosix_Unlink(userInfoBucket.c_str()); XrdPosix_Rmdir((mtpu_path / bucket.name).c_str()); @@ -256,20 +279,23 @@ std::pair BaseDir(std::string p) { //------------------------------------------------------------------------------ //! DeleteBucket - Delete a bucket and all its content -//! - we do this only it is empty and the backend bucket directory is not removed! -//! \param auth Authentication object -//! \param bucket Bucket to delete +//! - we do this only it is empty and the backend bucket directory is not +//! removed! \param auth Authentication object \param bucket Bucket to delete //! \return S3Error::None if successful, S3Error::InternalError otherwise //------------------------------------------------------------------------------ S3Error S3ObjectStore::DeleteBucket(S3Auth &auth, const S3Auth::Bucket &bucket) { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::DeleteBucket", "bucket-name:%s owner(%u:%u)", bucket.name.c_str(), bucket.owner.uid, bucket.owner.gid); + S3::S3Handler::Logger()->Log( + S3::INFO, "ObjectStore::DeleteBucket", "bucket-name:%s owner(%u:%u)", + bucket.name.c_str(), bucket.owner.uid, bucket.owner.gid); { // Check the backend directory with the users filesystem id - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); if (!S3Utils::IsDirEmpty(bucket.path)) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::DeleteBucket", "error bucket-name:%s is not empty!", bucket.name.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::DeleteBucket", + "error bucket-name:%s is not empty!", + bucket.name.c_str()); return S3Error::BucketNotEmpty; } } @@ -319,19 +345,23 @@ S3ObjectStore::Object::~Object() { #define XrdPosix_Listxattr listxattr //------------------------------------------------------------------------------ -//! \brief Object init +//! \brief Object init //! \param p Path to the object //! \return S3Error::None if successful, S3Error::InternalError otherwise //------------------------------------------------------------------------------ -S3Error S3ObjectStore::Object::Init(const std::filesystem::path &p, -uid_t uid, gid_t gid) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::Object::Init", "object-path=%s owner(%u:%u)", p.c_str(), uid, gid); +S3Error S3ObjectStore::Object::Init(const std::filesystem::path &p, uid_t uid, + gid_t gid) { + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::Object::Init", + "object-path=%s owner(%u:%u)", p.c_str(), uid, + gid); struct stat buf; // Do the backend operations with the users filesystem id ScopedFsId scope(uid, gid); if (XrdPosix_Stat(p.c_str(), &buf) || S_ISDIR(buf.st_mode)) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::Object::Init", "no such object - object-path=%s owner(%u:%u)", p.c_str(), uid, gid); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::Object::Init", + "no such object - object-path=%s owner(%u:%u)", + p.c_str(), uid, gid); return S3Error::NoSuchKey; } @@ -372,7 +402,9 @@ uid_t uid, gid_t gid) { //! \return Number of bytes read //------------------------------------------------------------------------------ ssize_t S3ObjectStore::Object::Read(size_t length, char **ptr) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::Object::Read", "object-path=%s owner(%u:%u) length=%u", name.c_str(), uid, gid, length); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::Object::Read", + "object-path=%s owner(%u:%u) length=%u", + name.c_str(), uid, gid, length); if (!init) { return 0; } @@ -398,7 +430,10 @@ ssize_t S3ObjectStore::Object::Read(size_t length, char **ptr) { //! \return Offset //------------------------------------------------------------------------------ off_t S3ObjectStore::Object::Lseek(off_t offset, int whence) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::Object::Seek", "object-path=%s owner(%u:%u) offset=%lu whence:%d", name.c_str(), uid, gid, offset, whence); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::Object::Seek", + "object-path=%s owner(%u:%u) offset=%lu whence:%d", name.c_str(), uid, + gid, offset, whence); if (!init) { return -1; } @@ -434,10 +469,13 @@ S3Error S3ObjectStore::DeleteObject(const S3Auth::Bucket &bucket, std::string base, obj; auto full_path = bucket.path / key; - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::DeleteObject", "object-path=%s", full_path.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::DeleteObject", + "object-path=%s", full_path.c_str()); if (XrdPosix_Unlink(full_path.c_str())) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::DeleteObject", "failed to delete object-path=%s", full_path.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::DeleteObject", + "failed to delete object-path=%s", + full_path.c_str()); return S3Error::NoSuchKey; } @@ -456,7 +494,8 @@ S3Error S3ObjectStore::DeleteObject(const S3Auth::Bucket &bucket, //------------------------------------------------------------------------------ std::vector S3ObjectStore::ListBuckets( const std::string &id) const { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::ListBuckets", "id:%s", id.c_str()); + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::ListBuckets", "id:%s", + id.c_str()); std::vector buckets; auto get_entry = [this, &buckets, &id](dirent *entry) { if (entry->d_name[0] == '.') { @@ -491,8 +530,11 @@ ListObjectsInfo S3ObjectStore::ListObjectVersions( const S3Auth::Bucket &bucket, const std::string &prefix, const std::string &key_marker, const std::string &version_id_marker, const char delimiter, int max_keys) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ListObjectVersions", "bucket:%s prefix:%s marker:%s vmarker:%s delimt=%c max-keys:%d", - bucket.name.c_str(), prefix.c_str(), key_marker.c_str(), version_id_marker.c_str(), delimiter, max_keys); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::ListObjectVersions", + "bucket:%s prefix:%s marker:%s vmarker:%s delimt=%c max-keys:%d", + bucket.name.c_str(), prefix.c_str(), key_marker.c_str(), + version_id_marker.c_str(), delimiter, max_keys); auto f = [](const std::filesystem::path &root, const std::string &object) { struct stat buf; if (!stat((root / object).c_str(), &buf)) { @@ -519,12 +561,17 @@ ListObjectsInfo S3ObjectStore::ListObjectVersions( S3Error S3ObjectStore::CopyObject(const S3Auth::Bucket &bucket, const std::string &key, Object &source_obj, const Headers &reqheaders, Headers &headers) { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CopyObject", "bucket:%s key:%s src=:%s", bucket.name.c_str(), key.c_str(), source_obj.Name().c_str()); + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CopyObject", + "bucket:%s key:%s src=:%s", bucket.name.c_str(), + key.c_str(), source_obj.Name().c_str()); auto final_path = bucket.path / key; struct stat buf; if (!XrdPosix_Stat(final_path.c_str(), &buf) && S_ISDIR(buf.st_mode)) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CopyObject", "target:%s is directory => bucket:%s key:%s src=:%s", final_path.c_str(), bucket.name.c_str(), source_obj.Name().c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CopyObject", + "target:%s is directory => bucket:%s key:%s src=:%s", + final_path.c_str(), bucket.name.c_str(), source_obj.Name().c_str()); return S3Error::ObjectExistAsDir; } @@ -536,7 +583,10 @@ S3Error S3ObjectStore::CopyObject(const S3Auth::Bucket &bucket, S_IRWXU | S_IRWXG); if (err == ENOTDIR) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CopyObject", "target:%s exists already=> bucket:%s key:%s src=:%s", final_path.c_str(), bucket.name.c_str(), source_obj.Name().c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CopyObject", + "target:%s exists already=> bucket:%s key:%s src=:%s", + final_path.c_str(), bucket.name.c_str(), source_obj.Name().c_str()); return S3Error::ObjectExistInObjectPath; } else if (err != 0) { return S3Error::InternalError; @@ -546,7 +596,10 @@ S3Error S3ObjectStore::CopyObject(const S3Auth::Bucket &bucket, S_IRWXU | S_IRGRP); if (fd < 0) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CopyObject", "target:%s failed to create => bucket:%s key:%s src=:%s", final_path.c_str(), bucket.name.c_str(), source_obj.Name().c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CopyObject", + "target:%s failed to create => bucket:%s key:%s src=:%s", + final_path.c_str(), bucket.name.c_str(), source_obj.Name().c_str()); S3Utils::RmPath(final_path.parent_path(), bucket.path); return S3Error::InternalError; } @@ -615,44 +668,51 @@ S3Error S3ObjectStore::CopyObject(const S3Auth::Bucket &bucket, //! - There cannot be another upload in progress of the same part.\n //------------------------------------------------------------------------------ -//! \brief KeepOptimize - Optimize the file by keeping only the parts that are needed -//! \param upload_path - The path to the file to optimize -//! \param part_number - The part number of the part to optimize -//! \param size - The size of the part to optimize -//! \param tmp_path - The path to the temporary file +//! \brief KeepOptimize - Optimize the file by keeping only the parts that are +//! needed \param upload_path - The path to the file to optimize \param +//! part_number - The part number of the part to optimize \param size - The size +//! of the part to optimize \param tmp_path - The path to the temporary file //! \param part_size - The size of the part //! \return true if the file can be optimized, false otherwise //------------------------------------------------------------------------------ bool S3ObjectStore::KeepOptimize(const std::filesystem::path &upload_path, size_t part_number, unsigned long size, - const std::string &tmp_path, - size_t part_size, std::vector& parts) { + const std::string &tmp_path, size_t part_size, + std::vector &parts) { // for the time being we disable optimized uploads - + return false; - auto p = S3Utils::stringJoin(',', parts); - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::KeepOptimize", "upload-path:%s part:%u size:%lu tmp-path=%s, part-size:%u, parts:%s", - upload_path.c_str(), part_number, size, tmp_path.c_str(), part_size, p.c_str()); + auto p = S3Utils::stringJoin(',', parts); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::KeepOptimize", + "upload-path:%s part:%u size:%lu tmp-path=%s, part-size:%u, parts:%s", + upload_path.c_str(), part_number, size, tmp_path.c_str(), part_size, + p.c_str()); size_t last_part_size; try { last_part_size = std::stoul(S3Utils::GetXattr(upload_path, "last_part_size")); } catch (std::exception &) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::KeepOptimize", "disabling - last_part_size is not set"); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::KeepOptimize", + "disabling - last_part_size is not set"); return false; } if (last_part_size == 0 && part_number != 1) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::KeepOptimize", "disabling - last_part_size is 0 and part_number is not 1 !"); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::KeepOptimize", + "disabling - last_part_size is 0 and part_number is not 1 !"); return false; } - + if (part_size == 0) { S3Utils::SetXattr(upload_path, "part_size", std::to_string(size), XATTR_REPLACE); S3Utils::SetXattr(upload_path, "last_part_size", std::to_string(size), XATTR_REPLACE); - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::KeepOptimize", "setting part_size:%u last_part_size:%u", size, size); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::KeepOptimize", + "setting part_size:%u last_part_size:%u", size, + size); return true; } @@ -703,7 +763,8 @@ bool S3ObjectStore::KeepOptimize(const std::filesystem::path &upload_path, S3Error ReadBufferAt(XrdS3Req &req, XrdCksCalcmd5 &md5XS, S3Crypt::S3SHA256 &sha256XS, int fd, unsigned long length) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ReadBufferAt", "fd:%d length:%lu", fd, length); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ReadBufferAt", + "fd:%d length:%lu", fd, length); int buflen = 0; unsigned long readlen = 0; char *ptr; @@ -743,7 +804,8 @@ std::pair ReadBufferIntoFile(XrdS3Req &req, S3Crypt::S3SHA256 &sha256XS, int fd, bool chunked, unsigned long size) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ReadBufferIntoFile", "fd:%d size:%lu chunked:%d", fd, size, chunked); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ReadBufferIntoFile", + "fd:%d size:%lu chunked:%d", fd, size, chunked); #define PUT_LIMIT 5000000000 auto reader = [&req, &md5XS, &sha256XS, fd](unsigned long length) { return ReadBufferAt(req, md5XS, sha256XS, fd, length); @@ -800,16 +862,24 @@ struct FileUploadResult { //------------------------------------------------------------------------------ FileUploadResult FileUploader(XrdS3Req &req, bool chunked, size_t size, std::filesystem::path &path) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::FileUploader", "%s path:%s chunked:%d size:%lu", req.trace.c_str(), path.c_str(), chunked, size); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::FileUploader", + "%s path:%s chunked:%d size:%lu", + req.trace.c_str(), path.c_str(), chunked, size); auto fd = XrdPosix_Open(path.c_str(), O_CREAT | O_EXCL | O_WRONLY, S_IRWXU | S_IRGRP); if (fd < 0) { - if ( errno == 13) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::FileUploader", "access denied : errno:13 path:%s chunked:%d size:%lu", path.c_str(), chunked, size); + if (errno == 13) { + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::FileUploader", + "access denied : errno:13 path:%s chunked:%d size:%lu", path.c_str(), + chunked, size); return FileUploadResult{S3Error::AccessDenied, {}, {}, {}}; } else { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::FileUploader", "internal error : errno:%d path:%s chunked:%d size:%lu", errno, path.c_str(), chunked, size); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::FileUploader", + "internal error : errno:%d path:%s chunked:%d size:%lu", errno, + path.c_str(), chunked, size); return FileUploadResult{S3Error::InternalError, {}, {}, {}}; } } @@ -861,12 +931,16 @@ S3Error S3ObjectStore::UploadPartOptimized(XrdS3Req &req, const std::string &tmp_path, size_t part_size, size_t part_number, size_t size, Headers &headers) { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::UploadPartOptimized", "%s tmp-path:%s part-size:%u part-number:%u size:%u", req.trace.c_str(), - tmp_path.c_str(), part_size, part_number, size); + S3::S3Handler::Logger()->Log( + S3::INFO, "ObjectStore::UploadPartOptimized", + "%s tmp-path:%s part-size:%u part-number:%u size:%u", req.trace.c_str(), + tmp_path.c_str(), part_size, part_number, size); auto fd = XrdPosix_Open(tmp_path.c_str(), O_WRONLY); if (fd < 0) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::UploadPartOptimized", "failed to open tmp-path:%s errno:%d", tmp_path.c_str(), errno); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::UploadPartOptimized", + "failed to open tmp-path:%s errno:%d", + tmp_path.c_str(), errno); return S3Error::InternalError; } @@ -877,7 +951,9 @@ S3Error S3ObjectStore::UploadPartOptimized(XrdS3Req &req, long long offset = (long long)part_size * (part_number - 1); - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPartOptimized", "tmp-path:%s computed offset:%lld", tmp_path.c_str(), offset); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPartOptimized", + "tmp-path:%s computed offset:%lld", + tmp_path.c_str(), offset); XrdPosix_Lseek(fd, offset, SEEK_SET); // TODO: error handling @@ -886,9 +962,11 @@ S3Error S3ObjectStore::UploadPartOptimized(XrdS3Req &req, XrdPosix_Close(fd); - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPartOptimized", "tmp-path:%s upload complete", tmp_path.c_str(), offset); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPartOptimized", + "tmp-path:%s upload complete", tmp_path.c_str(), + offset); std::cerr << "finished " << std::endl; - + if (error != S3Error::None) { return error; } @@ -897,7 +975,8 @@ S3Error S3ObjectStore::UploadPartOptimized(XrdS3Req &req, std::vector md5(md5f, md5f + 16); if (!req.md5.empty() && req.md5 != md5) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPartOptimized", "bad digest - tmp-path:%s", tmp_path.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPartOptimized", + "bad digest - tmp-path:%s", tmp_path.c_str()); return S3Error::BadDigest; } @@ -935,36 +1014,46 @@ S3Error S3ObjectStore::UploadPart(XrdS3Req &req, const std::string &upload_id, size_t part_number, unsigned long size, bool chunked, Headers &headers) { auto upload_path = mtpu_path / req.bucket / upload_id; - - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::UploadPart", "%s upload-id:%s part-number:%u size:%lu chunked:%d", req.trace.c_str(), - upload_id.c_str(), part_number, size, chunked); - + + S3::S3Handler::Logger()->Log( + S3::INFO, "ObjectStore::UploadPart", + "%s upload-id:%s part-number:%u size:%lu chunked:%d", req.trace.c_str(), + upload_id.c_str(), part_number, size, chunked); + auto err = ValidateMultipartUpload(upload_path, req.object); if (err != S3Error::None) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::UploadPart", "validation failed - upload-id:%s part-number:%u size:%lu chunked:%d", - upload_id.c_str(), part_number, size, chunked); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::UploadPart", + "validation failed - upload-id:%s part-number:%u size:%lu chunked:%d", + upload_id.c_str(), part_number, size, chunked); return err; } auto optimized = S3Utils::GetXattr(upload_path, "optimized"); uid_t uid; gid_t gid; - + try { uid = std::stoul(S3Utils::GetXattr(upload_path, "uid")); gid = std::stoul(S3Utils::GetXattr(upload_path, "gid")); } catch (std::exception &) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::UploadPart", "get attributes for (uid,gid) failed - upload-id:%s part-number:%u size:%lu chunked:%d", - upload_id.c_str(), part_number, size, chunked); - + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::UploadPart", + "get attributes for (uid,gid) failed - upload-id:%s part-number:%u " + "size:%lu chunked:%d", + upload_id.c_str(), part_number, size, chunked); + return S3Error::InternalError; } // Chunked uploads disables optimizations as we do not know the part size. if (chunked && !optimized.empty()) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPart", "disabling optimization for chunked uploads - upload-id:%s part-number:%u size:%lu chunked:%d", - upload_id.c_str(), part_number, size, chunked); - + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::UploadPart", + "disabling optimization for chunked uploads - upload-id:%s " + "part-number:%u size:%lu chunked:%d", + upload_id.c_str(), part_number, size, chunked); + S3Utils::SetXattr(upload_path, "optimized", "", XATTR_REPLACE); } @@ -975,22 +1064,28 @@ S3Error S3ObjectStore::UploadPart(XrdS3Req &req, const std::string &upload_id, try { part_size = std::stoul(S3Utils::GetXattr(upload_path, "part_size")); } catch (std::exception &) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::UploadPart", "failed to get 'part_size' attribute on '%s' - upload-id:%s part-number:%u size:%lu chunked:%d", - upload_path.c_str(), upload_id.c_str(), part_number, size, chunked); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::UploadPart", + "failed to get 'part_size' attribute on '%s' - upload-id:%s " + "part-number:%u size:%lu chunked:%d", + upload_path.c_str(), upload_id.c_str(), part_number, size, chunked); return S3Error::InternalError; } { std::vector parts; parts = GetPartsNumber(upload_path); - if (KeepOptimize(upload_path, part_number, size, tmp_path, part_size, parts)) { - ScopedFsId scope(uid, gid); - return UploadPartOptimized(req, tmp_path, part_size, part_number, size, - headers); + if (KeepOptimize(upload_path, part_number, size, tmp_path, part_size, + parts)) { + ScopedFsId scope(uid, gid); + return UploadPartOptimized(req, tmp_path, part_size, part_number, size, + headers); } } - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPart", "disabling optimization - upload-id:%s part-number:%u size:%lu chunked:%d", - upload_id.c_str(), part_number, size, chunked); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPart", + "disabling optimization - upload-id:%s " + "part-number:%u size:%lu chunked:%d", + upload_id.c_str(), part_number, size, chunked); S3Utils::SetXattr(upload_path, "optimized", "", XATTR_REPLACE); } @@ -1014,14 +1109,20 @@ S3Error S3ObjectStore::UploadPart(XrdS3Req &req, const std::string &upload_id, error = SetMetadata(tmp_path, metadata); if (error != S3Error::None) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::UploadPart", "error setting meta-data - unlinking path:%s - upload-id:%s part-number:%u size:%lu chunked:%d", - tmp_path.c_str(), upload_id.c_str(), part_number, size, chunked); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::UploadPart", + "error setting meta-data - unlinking path:%s - upload-id:%s " + "part-number:%u size:%lu chunked:%d", + tmp_path.c_str(), upload_id.c_str(), part_number, size, chunked); XrdPosix_Unlink(tmp_path.c_str()); } // TODO: handle error - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::UploadPart", "rename s:'%s'=>d:'%s' - upload-id:%s part-number:%u size:%lu chunked:%d", - tmp_path.c_str(), final_path.c_str(), upload_id.c_str(), part_number, size, chunked); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::UploadPart", + "rename s:'%s'=>d:'%s' - upload-id:%s part-number:%u size:%lu chunked:%d", + tmp_path.c_str(), final_path.c_str(), upload_id.c_str(), part_number, + size, chunked); XrdPosix_Rename(tmp_path.c_str(), final_path.c_str()); return error; } @@ -1038,14 +1139,19 @@ S3Error S3ObjectStore::UploadPart(XrdS3Req &req, const std::string &upload_id, S3Error S3ObjectStore::PutObject(XrdS3Req &req, const S3Auth::Bucket &bucket, unsigned long size, bool chunked, Headers &headers) { - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); auto final_path = bucket.path / req.object; - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::PutObject", "%s path:%s object-path:%s owner(%u:%u), chunked:%d size:%lu", req.trace.c_str(), bucket.path.c_str(), final_path.c_str(), bucket.owner.uid, bucket.owner.gid, chunked, size); + S3::S3Handler::Logger()->Log( + S3::INFO, "ObjectStore::PutObject", + "%s path:%s object-path:%s owner(%u:%u), chunked:%d size:%lu", + req.trace.c_str(), bucket.path.c_str(), final_path.c_str(), + bucket.owner.uid, bucket.owner.gid, chunked, size); struct stat buf; if (!XrdPosix_Stat(final_path.c_str(), &buf) && S_ISDIR(buf.st_mode)) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", "path:%s is a directory", final_path.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", + "path:%s is a directory", final_path.c_str()); return S3Error::ObjectExistAsDir; } @@ -1057,10 +1163,14 @@ S3Error S3ObjectStore::PutObject(XrdS3Req &req, const S3Auth::Bucket &bucket, auto err = S3Utils::makePath((char *)final_path.parent_path().c_str(), S_IRWXU | S_IRGRP); if (err == ENOTDIR) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", "object exists in object path - path:%s", final_path.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", + "object exists in object path - path:%s", + final_path.c_str()); return S3Error::ObjectExistInObjectPath; } else if (err != 0) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", "internal error makeing parent : path:%s", final_path.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", + "internal error makeing parent : path:%s", + final_path.c_str()); return S3Error::InternalError; } @@ -1068,7 +1178,9 @@ S3Error S3ObjectStore::PutObject(XrdS3Req &req, const S3Auth::Bucket &bucket, FileUploader(req, chunked, size, tmp_path); if (error != S3Error::None) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", "upload to path:%s failed - unlink path:%s", tmp_path.c_str(), tmp_path.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", + "upload to path:%s failed - unlink path:%s", + tmp_path.c_str(), tmp_path.c_str()); XrdPosix_Unlink(tmp_path.c_str()); S3Utils::RmPath(final_path.parent_path(), bucket.path); return error; @@ -1100,7 +1212,10 @@ S3Error S3ObjectStore::PutObject(XrdS3Req &req, const S3Auth::Bucket &bucket, } error = SetMetadata(tmp_path, metadata); if (error != S3Error::None) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::PutObject", "setting meta-data on path:%s failed - unlink path:%s", tmp_path.c_str(), tmp_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::PutObject", + "setting meta-data on path:%s failed - unlink path:%s", + tmp_path.c_str(), tmp_path.c_str()); XrdPosix_Unlink(tmp_path.c_str()); S3Utils::RmPath(final_path.parent_path(), bucket.path); return error; @@ -1125,17 +1240,26 @@ S3ObjectStore::DeleteObjects(const S3Auth::Bucket &bucket, std::vector error; for (const auto &o : objects) { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::DeleteObjects", "bucket:%s object:%s",bucket.name.c_str(), o.key.c_str()); + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::DeleteObjects", + "bucket:%s object:%s", bucket.name.c_str(), + o.key.c_str()); auto err = DeleteObject(bucket, o.key); if (err == S3Error::None || err == S3Error::NoSuchKey) { if (err == S3Error::None) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::DeleteObjects", "deleted bucket:%s object:%s",bucket.name.c_str(), o.key.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::DeleteObjects", + "deleted bucket:%s object:%s", + bucket.name.c_str(), o.key.c_str()); } else { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::DeleteObjects", "no such key - bucket:%s object:%s",bucket.name.c_str(), o.key.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::DeleteObjects", + "no such key - bucket:%s object:%s", + bucket.name.c_str(), o.key.c_str()); } deleted.push_back({o.key, o.version_id, false, ""}); } else { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::DeleteObjects", "internal error when delting bucket:%s object:%s",bucket.name.c_str(), o.key.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::DeleteObjects", + "internal error when delting bucket:%s object:%s", + bucket.name.c_str(), o.key.c_str()); error.push_back({S3Error::InternalError, o.key, "", o.version_id}); } } @@ -1157,12 +1281,15 @@ ListObjectsInfo S3ObjectStore::ListObjectsV2( const S3Auth::Bucket &bucket, const std::string &prefix, const std::string &continuation_token, const char delimiter, int max_keys, bool fetch_owner, const std::string &start_after) { - - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::ListObjectsV2", "bucket:%s prefix:%s cont-token:%s delimiter:%c max-keys:%d fetch-owner:%d start-after:%s", - bucket.name.c_str(), prefix.c_str(), continuation_token.c_str(), delimiter, max_keys, fetch_owner, start_after.c_str()); - - auto f = [fetch_owner](const std::filesystem::path &root, - const std::string &object) { + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::ListObjectsV2", + "bucket:%s prefix:%s cont-token:%s delimiter:%c " + "max-keys:%d fetch-owner:%d start-after:%s", + bucket.name.c_str(), prefix.c_str(), + continuation_token.c_str(), delimiter, max_keys, + fetch_owner, start_after.c_str()); + + auto f = [fetch_owner](const std::filesystem::path &root, + const std::string &object) { struct stat buf; std::string owner; @@ -1197,9 +1324,11 @@ ListObjectsInfo S3ObjectStore::ListObjects(const S3Auth::Bucket &bucket, const std::string &prefix, const std::string &marker, const char delimiter, int max_keys) { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::ListObjects", "bucket:%s prefix:%s marker:%s delimiter:%c max-keys:%d", - bucket.name.c_str(), prefix.c_str(), marker.c_str(), delimiter, max_keys); - + S3::S3Handler::Logger()->Log( + S3::INFO, "ObjectStore::ListObjects", + "bucket:%s prefix:%s marker:%s delimiter:%c max-keys:%d", + bucket.name.c_str(), prefix.c_str(), marker.c_str(), delimiter, max_keys); + auto f = [](const std::filesystem::path &root, const std::string &object) { struct stat buf; @@ -1236,9 +1365,12 @@ ListObjectsInfo S3ObjectStore::ListObjectsCommon( const std::function &f) { std::string basedir; - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ListObjectsCommon", "bucket:%s prefix:%s marker:%s delimiter:%c max-keys:%d get-version:%d", - bucket.name.c_str(), prefix.c_str(), marker.c_str(), delimiter, max_keys, get_versions); - + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::ListObjectsCommon", + "bucket:%s prefix:%s marker:%s delimiter:%c max-keys:%d get-version:%d", + bucket.name.c_str(), prefix.c_str(), marker.c_str(), delimiter, max_keys, + get_versions); + if (prefix == "/" || max_keys == 0) { return {}; } @@ -1251,7 +1383,7 @@ ListObjectsInfo S3ObjectStore::ListObjectsCommon( auto fullpath = bucket.path; std::cerr << "list fullpath=" << fullpath << std::endl; - + struct BasicPath { std::string base; std::string name; @@ -1358,7 +1490,9 @@ ListObjectsInfo S3ObjectStore::ListObjectsCommon( //------------------------------------------------------------------------------ std::pair S3ObjectStore::CreateMultipartUpload( const S3Auth::Bucket &bucket, const std::string &key) { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CreateMultipartUpload", "bucket:%s key:%s", bucket.name.c_str(), key.c_str()); + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CreateMultipartUpload", + "bucket:%s key:%s", bucket.name.c_str(), + key.c_str()); // TODO: Metadata uploaded with the create multipart upload operation is not // saved to the final file. @@ -1371,37 +1505,45 @@ std::pair S3ObjectStore::CreateMultipartUpload( ("." + final_path.filename().string() + "." + std::to_string(std::time(nullptr)) + std::to_string(std::rand())); - auto pp = mtpu_path / bucket.name ; + auto pp = mtpu_path / bucket.name; auto p = mtpu_path / bucket.name / upload_id; - - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CreateMultipartUpload", "bucket:%s key:%s tmp-upload-path:%s final-path:%s", - bucket.name.c_str(), key.c_str(), p.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CreateMultipartUpload", + "bucket:%s key:%s tmp-upload-path:%s final-path:%s", bucket.name.c_str(), + key.c_str(), p.c_str(), final_path.c_str()); // TODO: error handling XrdPosix_Mkdir(pp.c_str(), S_IRWXU | S_IRGRP); XrdPosix_Mkdir(p.c_str(), S_IRWXU | S_IRGRP); - + { // we have to do this as the owner of the bucket - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); auto err = S3Utils::makePath((char *)final_path.parent_path().c_str(), - S_IRWXU | S_IRGRP); - + S_IRWXU | S_IRGRP); + if (err == ENOTDIR) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateMultipartUpload", "bucket:%s key:%s object exists in path:%s", - bucket.name.c_str(), key.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CreateMultipartUpload", + "bucket:%s key:%s object exists in path:%s", bucket.name.c_str(), + key.c_str(), final_path.c_str()); return {{}, S3Error::ObjectExistInObjectPath}; } else if (err != 0) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateMultipartUpload", "internal error - bucket:%s key:%s tmp-upload-path:%s final-path:%s", - bucket.name.c_str(), key.c_str(), p.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CreateMultipartUpload", + "internal error - bucket:%s key:%s tmp-upload-path:%s final-path:%s", + bucket.name.c_str(), key.c_str(), p.c_str(), final_path.c_str()); return {{}, S3Error::InternalError}; } - + auto fd = XrdPosix_Open(tmp_path.c_str(), O_CREAT | O_EXCL | O_WRONLY, - S_IRWXU | S_IRGRP); - + S_IRWXU | S_IRGRP); + if (fd < 0) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CreateMultipartUpload", "bucket:%s key:%s failed to create tmp-upload-path:%s", bucket.name.c_str(), key.c_str(), tmp_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CreateMultipartUpload", + "bucket:%s key:%s failed to create tmp-upload-path:%s", + bucket.name.c_str(), key.c_str(), tmp_path.c_str()); S3Utils::RmPath(final_path.parent_path(), bucket.path); return {{}, S3Error::InternalError}; } @@ -1412,10 +1554,15 @@ std::pair S3ObjectStore::CreateMultipartUpload( S3Utils::SetXattr(p, "tmp", tmp_path, XATTR_CREATE); S3Utils::SetXattr(p, "part_size", "0", XATTR_CREATE); S3Utils::SetXattr(p, "last_part_size", "0", XATTR_CREATE); - S3Utils::SetXattr(p, "uid" , std::to_string(bucket.owner.uid).c_str(), XATTR_CREATE); - S3Utils::SetXattr(p, "gid" , std::to_string(bucket.owner.gid).c_str(), XATTR_CREATE); - - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CreateMultipartUpload", "bucket:%s key:%s upload-id:%s", bucket.name.c_str(), key.c_str(), upload_id.c_str()); + S3Utils::SetXattr(p, "uid", std::to_string(bucket.owner.uid).c_str(), + XATTR_CREATE); + S3Utils::SetXattr(p, "gid", std::to_string(bucket.owner.gid).c_str(), + XATTR_CREATE); + + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CreateMultipartUpload", + "bucket:%s key:%s upload-id:%s", + bucket.name.c_str(), key.c_str(), + upload_id.c_str()); return {upload_id, S3Error::None}; } @@ -1428,7 +1575,9 @@ std::vector S3ObjectStore::ListMultipartUploads(const std::string &bucket) { auto upload_path = mtpu_path / bucket; - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::ListMultipartUpload", "bucket:%s upload-path:%s", bucket.c_str(), upload_path.c_str()); + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::ListMultipartUpload", + "bucket:%s upload-path:%s", bucket.c_str(), + upload_path.c_str()); std::vector uploads; auto parse_upload = [upload_path, &uploads](dirent *entry) { if (entry->d_name[0] == '.') { @@ -1455,7 +1604,10 @@ S3ObjectStore::ListMultipartUploads(const std::string &bucket) { S3Error S3ObjectStore::AbortMultipartUpload(const S3Auth::Bucket &bucket, const std::string &key, const std::string &upload_id) { - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::AbortMultipartUpload", "bucket:%s key:%s upload-id:%s", bucket.name.c_str(), key.c_str(), upload_id.c_str()); + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::AbortMultipartUpload", + "bucket:%s key:%s upload-id:%s", + bucket.name.c_str(), key.c_str(), + upload_id.c_str()); auto upload_path = mtpu_path / bucket.name / upload_id; auto tmp_path = S3Utils::GetXattr(upload_path, "tmp"); @@ -1483,7 +1635,10 @@ S3Error S3ObjectStore::DeleteMultipartUpload(const S3Auth::Bucket &bucket, const std::string &upload_id) { auto upload_path = mtpu_path / bucket.name / upload_id; - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::DeleteMultipartUpload", "bucket:%s key:%s upload-id:%s", bucket.name.c_str(), key.c_str(), upload_id.c_str()); + S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::DeleteMultipartUpload", + "bucket:%s key:%s upload-id:%s", + bucket.name.c_str(), key.c_str(), + upload_id.c_str()); auto err = ValidateMultipartUpload(upload_path, key); if (err != S3Error::None) { @@ -1514,7 +1669,9 @@ S3Error S3ObjectStore::DeleteMultipartUpload(const S3Auth::Bucket &bucket, //------------------------------------------------------------------------------ S3Error S3ObjectStore::ValidateMultipartUpload(const std::string &upload_path, const std::string &key) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ValidateMultipartUpload", "key:%s upload-path:%s",key.c_str(), upload_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::ValidateMultipartUpload", + "key:%s upload-path:%s", key.c_str(), upload_path.c_str()); struct stat buf; @@ -1524,7 +1681,10 @@ S3Error S3ObjectStore::ValidateMultipartUpload(const std::string &upload_path, auto upload_key = S3Utils::GetXattr(upload_path, "key"); if (upload_key != key) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::ValidateMultipartUpload", "key:%s upload-key:%s - keys do not match - invalid request",key.c_str(), upload_key.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::ValidateMultipartUpload", + "key:%s upload-key:%s - keys do not match - invalid request", + key.c_str(), upload_key.c_str()); return S3Error::InvalidRequest; } @@ -1543,12 +1703,16 @@ S3ObjectStore::ListParts(const std::string &bucket, const std::string &key, const std::string &upload_id) { auto upload_path = mtpu_path / bucket / upload_id; - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ListParts", "bucket:%s key:%s upload-id:%s upload-path:%s", - bucket.c_str(), key.c_str(), upload_id.c_str(), upload_path.c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::ListParts", + "bucket:%s key:%s upload-id:%s upload-path:%s", + bucket.c_str(), key.c_str(), upload_id.c_str(), + upload_path.c_str()); auto err = ValidateMultipartUpload(upload_path, key); if (err != S3Error::None) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::ListParts", "bucket:%s key:%s upload-id:%s upload-path:%s validation failed", - bucket.c_str(), key.c_str(), upload_id.c_str(), upload_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::ListParts", + "bucket:%s key:%s upload-id:%s upload-path:%s validation failed", + bucket.c_str(), key.c_str(), upload_id.c_str(), upload_path.c_str()); return {err, {}}; } @@ -1592,48 +1756,62 @@ bool S3ObjectStore::CompleteOptimizedMultipartUpload( const std::filesystem::path &final_path, const std::filesystem::path &tmp_path, const std::vector &parts) { std::string p; - for (auto i:parts) { + for (auto i : parts) { p += i.nstr(); p += ","; } p.pop_back(); - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CompleteOptimizedMultipartUpload", "final-path:%s tmp-path:%s parts:%s", - final_path.c_str(), tmp_path.c_str(), p.c_str()); - for (auto i:parts) { + S3::S3Handler::Logger()->Log(S3::INFO, + "ObjectStore::CompleteOptimizedMultipartUpload", + "final-path:%s tmp-path:%s parts:%s", + final_path.c_str(), tmp_path.c_str(), p.c_str()); + for (auto i : parts) { std::cerr << i.str() << std::endl; } - + size_t e = 1; - + for (const auto &[etag, _, n, __] : parts) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteOptimizedMultipartUpload", "final-path:%s tmp-path:%s part:%d etag:%s", - final_path.c_str(), tmp_path.c_str(), n, etag.c_str()); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteOptimizedMultipartUpload", + "final-path:%s tmp-path:%s part:%d etag:%s", final_path.c_str(), + tmp_path.c_str(), n, etag.c_str()); if (e != n) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteOptimizedMultipartUpload", "final-path:%s tmp-path:%s part:%d etag:%s e!=n", - final_path.c_str(), tmp_path.c_str(), n, etag.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteOptimizedMultipartUpload", + "final-path:%s tmp-path:%s part:%d etag:%s e!=n", final_path.c_str(), + tmp_path.c_str(), n, etag.c_str()); return false; } e++; auto id = "part" + std::to_string(n); if (S3Utils::GetXattr(tmp_path, id + ".start").empty()) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteOptimizedMultipartUpload", "final-path:%s tmp-path:%s part:%d etag:%s '.start' attribute is empty", - final_path.c_str(), tmp_path.c_str(), n, etag.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteOptimizedMultipartUpload", + "final-path:%s tmp-path:%s part:%d etag:%s '.start' attribute is " + "empty", + final_path.c_str(), tmp_path.c_str(), n, etag.c_str()); return false; } - + if (S3Utils::GetXattr(tmp_path, id + ".etag") != etag) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteOptimizedMultipartUpload", "final-path:%s tmp-path:%s part:%d etag:%s '.etag' attribute is empty", - final_path.c_str(), tmp_path.c_str(), n, etag.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteOptimizedMultipartUpload", + "final-path:%s tmp-path:%s part:%d etag:%s '.etag' attribute is " + "empty", + final_path.c_str(), tmp_path.c_str(), n, etag.c_str()); return false; } } // TODO: error handling XrdPosix_Rename(tmp_path.c_str(), final_path.c_str()); - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CompleteOptimizedMultipartUpload", "final-path:%s tmp-path:%s parts:%s has been successfully finalized", - final_path.c_str(), tmp_path.c_str(), p.c_str()); - + S3::S3Handler::Logger()->Log( + S3::INFO, "ObjectStore::CompleteOptimizedMultipartUpload", + "final-path:%s tmp-path:%s parts:%s has been successfully finalized", + final_path.c_str(), tmp_path.c_str(), p.c_str()); + return true; } @@ -1651,18 +1829,25 @@ S3Error S3ObjectStore::CompleteMultipartUpload( const std::string &upload_id, const std::vector &parts) { auto upload_path = mtpu_path / req.bucket / upload_id; std::string p; - for (auto i:parts) { + for (auto i : parts) { p += i.nstr(); p += ","; } p.pop_back(); - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CompleteMultipartUpload", "%s bucket:%s key:%s upload-id:%s parts:%s upload-path:%s", - req.trace.c_str(), bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::INFO, "ObjectStore::CompleteMultipartUpload", + "%s bucket:%s key:%s upload-id:%s parts:%s upload-path:%s", + req.trace.c_str(), bucket.name.c_str(), key.c_str(), upload_id.c_str(), + p.c_str(), upload_path.c_str()); auto err = ValidateMultipartUpload(upload_path, req.object); if (err != S3Error::None) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s didn't get validated", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s didn't get " + "validated", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str()); return err; } @@ -1670,20 +1855,25 @@ S3Error S3ObjectStore::CompleteMultipartUpload( auto opt_path = S3Utils::GetXattr(upload_path, "tmp"); auto optimized = S3Utils::GetXattr(upload_path, "optimized"); - - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); + + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); // Check if we are able to complete the multipart upload with only a mv // operation. if (!optimized.empty()) { - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); if (!CompleteOptimizedMultipartUpload(final_path, opt_path, parts)) { return DeleteMultipartUpload(bucket, key, upload_id); } } else { // Otherwise we will need to concatenate parts - + // First check that all the parts are in order. // We first check if a file named partN exists in the multipart upload dir, // then check if it exists in the optimized upload tmp path. @@ -1691,61 +1881,99 @@ S3Error S3ObjectStore::CompleteMultipartUpload( struct stat buf; for (const auto &[etag, _, n, __] : parts) { if (n <= max) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s invalid part ordering", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); - return S3Error::InvalidPartOrder; + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s " + "optimized:%d opt-path:%s final-path:%s invalid part ordering", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); + return S3Error::InvalidPartOrder; } max = n; - + auto path = upload_path / std::to_string(n); // TODO: error handling if (XrdPosix_Stat(path.c_str(), &buf)) { - auto id = "part" + std::to_string(n); - if (S3Utils::GetXattr(opt_path, id + ".start").empty()) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s invalid part .start attribute", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); - - return S3Error::InvalidPart; - } - if (S3Utils::GetXattr(opt_path, id + ".etag") != etag) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s invalid part .etag attribute", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); - - return S3Error::InvalidPart; - } + auto id = "part" + std::to_string(n); + if (S3Utils::GetXattr(opt_path, id + ".start").empty()) { + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s " + "optimized:%d opt-path:%s final-path:%s invalid part .start " + "attribute", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); + + return S3Error::InvalidPart; + } + if (S3Utils::GetXattr(opt_path, id + ".etag") != etag) { + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s " + "optimized:%d opt-path:%s final-path:%s invalid part .etag " + "attribute", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); + + return S3Error::InvalidPart; + } } else { - if (S3Utils::GetXattr(path, "etag") != etag) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s invalid .etag attribute", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); - return S3Error::InvalidPart; - } + if (S3Utils::GetXattr(path, "etag") != etag) { + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s " + "optimized:%d opt-path:%s final-path:%s invalid .etag attribute", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); + return S3Error::InvalidPart; + } } } } - { // Check if the final file exists in the backend and is a directory - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); struct stat buf; if (!XrdPosix_Stat(final_path.c_str(), &buf)) { if (S_ISDIR(buf.st_mode)) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s final-path is a directory!", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s " + "optimized:%d opt-path:%s final-path:%s final-path is a directory!", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); return S3Error::ObjectExistAsDir; } } else { if (errno != ENOENT) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s final-path cannot be accessed!", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); - return S3Error::AccessDenied; + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s " + "optimized:%d opt-path:%s final-path:%s final-path cannot be " + "accessed!", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); + return S3Error::AccessDenied; } } } - - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s copying parts to final destination via tempfile+rename ...", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); - + + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s copying parts to final destination via " + "tempfile+rename ...", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); + // Then we copy all the parts into a tmp file, which will be renamed to the // final file. auto tmp_path = bucket.path / @@ -1755,34 +1983,49 @@ S3Error S3ObjectStore::CompleteMultipartUpload( int fd = 0; { // The temp file has to created using the filesystem id of the owner - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); - fd = XrdPosix_Open(tmp_path.c_str(), O_CREAT | O_EXCL | O_WRONLY, S_IRWXU | S_IRGRP); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); + fd = XrdPosix_Open(tmp_path.c_str(), O_CREAT | O_EXCL | O_WRONLY, + S_IRWXU | S_IRGRP); } if (fd < 0) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s final-path:%s failed to open tmp-path:%s!", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str(), tmp_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s final-path:%s failed to open tmp-path:%s!", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str(), tmp_path.c_str()); std::cerr << "internal error opening final file" << std::endl; return S3Error::InternalError; } - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s starting checksummming ...", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); - + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s starting checksummming ...", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); XrdCksCalcmd5 xs; xs.Init(); Object optimized_obj; - optimized_obj.Init(opt_path,bucket.owner.uid, bucket.owner.gid); + optimized_obj.Init(opt_path, bucket.owner.uid, bucket.owner.gid); ssize_t opt_len; try { - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); opt_len = std::stol(S3Utils::GetXattr(opt_path, "part_size")); } catch (std::exception &) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s part size not set on opt-path:%s!", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str(), opt_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s part size not set on opt-path:%s!", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str(), opt_path.c_str()); opt_len = 0; } @@ -1791,10 +2034,16 @@ S3Error S3ObjectStore::CompleteMultipartUpload( for (const auto &part : parts) { Object obj; - if (obj.Init(upload_path / std::to_string(part.part_number),geteuid(), getegid()) != - S3Error::None) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s using optimized part %s ...", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str(), (upload_path / std::to_string(part.part_number)).c_str()); + if (obj.Init(upload_path / std::to_string(part.part_number), geteuid(), + getegid()) != S3Error::None) { + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s using optimized part %s ...", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str(), + (upload_path / std::to_string(part.part_number)).c_str()); // use the optimized part ssize_t start; @@ -1812,7 +2061,7 @@ S3Error S3ObjectStore::CompleteMultipartUpload( ssize_t len = opt_len; while ((i = optimized_obj.Read(len, &ptr)) > 0) { if (len < i) { - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); XrdPosix_Close(fd); XrdPosix_Unlink(tmp_path.c_str()); S3Utils::RmPath(final_path.parent_path(), bucket.path); @@ -1823,35 +2072,56 @@ S3Error S3ObjectStore::CompleteMultipartUpload( XrdPosix_Write(fd, ptr, i); } } else { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s using temporary part ...", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s using temporary part ...", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); ssize_t len = obj.GetSize(); ssize_t i = 0; while ((i = obj.Read(len, &ptr)) > 0) { if (len < i) { - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); XrdPosix_Close(fd); XrdPosix_Unlink(tmp_path.c_str()); S3Utils::RmPath(final_path.parent_path(), bucket.path); - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s read error on temporary part ...", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s " + "optimized:%d opt-path:%s final-path:%s read error on temporary " + "part ...", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); return S3Error::InternalError; } len -= i; xs.Update(ptr, i); - // TODO: error handling + // TODO: error handling XrdPosix_Write(fd, ptr, i); - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s writing part ...", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s " + "optimized:%d opt-path:%s final-path:%s writing part ...", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); } } } XrdPosix_Close(fd); - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s finalizing checksum ...", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s finalizing checksum ...", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str()); char *fxs = xs.Final(); std::vector md5(fxs, fxs + 16); auto md5hex = '"' + S3Utils::HexEncode(md5) + '"'; @@ -1860,32 +2130,47 @@ S3Error S3ObjectStore::CompleteMultipartUpload( metadata.insert({"etag", md5hex}); { - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); S3Error error = SetMetadata(tmp_path, metadata); if (error != S3Error::None) { - S3::S3Handler::Logger()->Log(S3::ERROR, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s error setting metadata on tmp-path:%s", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str(), tmp_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::ERROR, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s error setting metadata on tmp-path:%s", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str(), tmp_path.c_str()); // TODO: error handling XrdPosix_Unlink(tmp_path.c_str()); S3Utils::RmPath(final_path.parent_path(), bucket.path); return error; - } + } } { - S3::S3Handler::Logger()->Log(S3::DEBUG, "ObjectStore::CompleteMultipartUpload", "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s renaming %s => %s", - bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str(), tmp_path.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::DEBUG, "ObjectStore::CompleteMultipartUpload", + "bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s renaming %s => %s", + bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), + upload_path.c_str(), optimized.length() ? 1 : 0, opt_path.c_str(), + final_path.c_str(), tmp_path.c_str(), final_path.c_str()); // Rename using the owner filesystem id - ScopedFsId scope(bucket.owner.uid,bucket.owner.gid); + ScopedFsId scope(bucket.owner.uid, bucket.owner.gid); // TODO: error handling XrdPosix_Rename(tmp_path.c_str(), final_path.c_str()); // TODO: error handling XrdPosix_Unlink(opt_path.c_str()); } - + DeleteMultipartUpload(bucket, key, upload_id); - S3::S3Handler::Logger()->Log(S3::INFO, "ObjectStore::CompleteMultipartUpload", "%s bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d opt-path:%s final-path:%s multipart upload complete!", - req.trace.c_str(), bucket.name.c_str(), key.c_str(), upload_id.c_str(), p.c_str(), upload_path.c_str(), optimized.length()?1:0, opt_path.c_str(), final_path.c_str()); + S3::S3Handler::Logger()->Log( + S3::INFO, "ObjectStore::CompleteMultipartUpload", + "%s bucket:%s key:%s upload-id:%s parts;%s upload-path:%s optimized:%d " + "opt-path:%s final-path:%s multipart upload complete!", + req.trace.c_str(), bucket.name.c_str(), key.c_str(), upload_id.c_str(), + p.c_str(), upload_path.c_str(), optimized.length() ? 1 : 0, + opt_path.c_str(), final_path.c_str()); return S3Error::None; } diff --git a/src/XrdS3/XrdS3ObjectStore.hh b/src/XrdS3/XrdS3ObjectStore.hh index 6acb7c8a753..d5966e85438 100644 --- a/src/XrdS3/XrdS3ObjectStore.hh +++ b/src/XrdS3/XrdS3ObjectStore.hh @@ -27,20 +27,20 @@ #include #include #include -#include -#include -#include #include +#include +#include +#include #include +#include #include -#include -#include +#include #include "XrdPosix/XrdPosixXrootd.hh" #include "XrdS3Auth.hh" #include "XrdS3ErrorResponse.hh" -#include "XrdS3Req.hh" #include "XrdS3Log.hh" +#include "XrdS3Req.hh" namespace S3 { @@ -106,7 +106,7 @@ class S3ObjectStore { ssize_t Read(size_t length, char **data); off_t Lseek(off_t offset, int whence); std::string Name() const { return name; } - + const std::map &GetAttributes() const { return attributes; }; @@ -126,59 +126,57 @@ class S3ObjectStore { }; class ExclusiveLocker { - public: + public: // Default constructor ExclusiveLocker() = default; // Copy constructor - ExclusiveLocker(const ExclusiveLocker& other) { - } - + ExclusiveLocker(const ExclusiveLocker &other) {} + virtual ~ExclusiveLocker() {} - + // Dummy Assignment operator - ExclusiveLocker& operator=(const ExclusiveLocker& other) { - return *this; - } - + ExclusiveLocker &operator=(const ExclusiveLocker &other) { return *this; } + // Function to acquire a lock for a given name - void lock(const std::string& name) { + void lock(const std::string &name) { std::unique_lock map_lock(map_mutex_); std::shared_ptr mutex = getOrCreateMutex(name); - map_lock.unlock(); // Release the map lock before acquiring the object-specific lock + map_lock.unlock(); // Release the map lock before acquiring the + // object-specific lock mutex->lock(); } - + // Function to release a lock for a given name - void unlock(const std::string& name) { + void unlock(const std::string &name) { std::unique_lock map_lock(map_mutex_); auto it = mutex_map_.find(name); if (it != mutex_map_.end()) { - it->second->unlock(); - if (it->second.use_count() == 1) { - // If this was the last reference, remove the entry from the map - mutex_map_.erase(it); - } + it->second->unlock(); + if (it->second.use_count() == 1) { + // If this was the last reference, remove the entry from the map + mutex_map_.erase(it); + } } } - - private: + + private: std::unordered_map> mutex_map_; std::mutex map_mutex_; - + // Function to get or create a mutex for a given name - std::shared_ptr getOrCreateMutex(const std::string& name) { + std::shared_ptr getOrCreateMutex(const std::string &name) { auto it = mutex_map_.find(name); if (it == mutex_map_.end()) { - auto mutex = std::make_shared(); - mutex_map_[name] = mutex; - return mutex; + auto mutex = std::make_shared(); + mutex_map_[name] = mutex; + return mutex; } else { - return it->second; + return it->second; } } }; - + S3Error CreateBucket(S3Auth &auth, S3Auth::Bucket bucket, const std::string &_location); S3Error DeleteBucket(S3Auth &auth, const S3Auth::Bucket &bucket); @@ -241,11 +239,11 @@ class S3ObjectStore { size_t part_number; size_t size; std::string str() { - return "# " + std::to_string(part_number) + " size: " + std::to_string( size ) + " etag: " +etag + " modified: " + std::to_string(last_modified); - } - std::string nstr() { - return std::to_string(part_number); + return "# " + std::to_string(part_number) + + " size: " + std::to_string(size) + " etag: " + etag + + " modified: " + std::to_string(last_modified); } + std::string nstr() { return std::to_string(part_number); } }; std::vector ListMultipartUploads( @@ -270,14 +268,13 @@ class S3ObjectStore { static ExclusiveLocker s_exclusive_locker; -private: + private: static bool ValidateBucketName(const std::string &name); std::filesystem::path config_path; std::filesystem::path user_map; std::filesystem::path mtpu_path; - ListObjectsInfo ListObjectsCommon( const S3Auth::Bucket &bucket, std::string prefix, const std::string &marker, char delimiter, int max_keys, @@ -300,7 +297,8 @@ private: bool KeepOptimize(const std::filesystem::path &upload_path, size_t part_number, unsigned long size, - const std::string &tmp_path, size_t part_size, std::vector &parts); + const std::string &tmp_path, size_t part_size, + std::vector &parts); [[nodiscard]] static S3Error ValidateMultipartUpload( const std::string &upload_path, const std::string &key); S3Error DeleteMultipartUpload(const S3Auth::Bucket &bucket, @@ -314,4 +312,3 @@ private: }; } // namespace S3 - diff --git a/src/XrdS3/XrdS3Req.cc b/src/XrdS3/XrdS3Req.cc index f018f44945d..ed5cddad8b4 100644 --- a/src/XrdS3/XrdS3Req.cc +++ b/src/XrdS3/XrdS3Req.cc @@ -290,7 +290,8 @@ std::string MergeHeaders(const std::map &headers) { return res.substr(0, res.empty() ? 0 : res.length() - 2); } -int XrdS3Req::S3Response(int code, const std::map &headers, +int XrdS3Req::S3Response(int code, + const std::map &headers, const std::string &body) { std::string headers_str = MergeHeaders(headers); @@ -298,7 +299,8 @@ int XrdS3Req::S3Response(int code, const std::map &hea body.size()); } -int XrdS3Req::S3Response(int code, const std::map &headers, +int XrdS3Req::S3Response(int code, + const std::map &headers, const char *body, long long size) { std::string headers_str = MergeHeaders(headers); diff --git a/src/XrdS3/XrdS3Req.hh b/src/XrdS3/XrdS3Req.hh index db4031ecb64..f34f0333634 100644 --- a/src/XrdS3/XrdS3Req.hh +++ b/src/XrdS3/XrdS3Req.hh @@ -119,4 +119,3 @@ class XrdS3Req : protected XrdHttpExtReq { using HandlerFunc = std::function; } // namespace S3 - diff --git a/src/XrdS3/XrdS3Response.cc b/src/XrdS3/XrdS3Response.cc index afb6bf2caab..e8140df3231 100644 --- a/src/XrdS3/XrdS3Response.cc +++ b/src/XrdS3/XrdS3Response.cc @@ -1,6 +1,26 @@ +//------------------------------------------------------------------------------ +// Copyright (c) 2024 by European Organization for Nuclear Research (CERN) +// Author: Mano Segransan / CERN EOS Project +//------------------------------------------------------------------------------ +// This file is part of the XRootD software suite. +// +// XRootD is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. // -// Created by segransm on 11/16/23. +// XRootD is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. // +// You should have received a copy of the GNU Lesser General Public License +// along with XRootD. If not, see . +// +// In applying this licence, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. +//------------------------------------------------------------------------------ //------------------------------------------------------------------------------ #include "XrdS3Response.hh" @@ -165,8 +185,8 @@ int ListObjectsV2Response(XrdS3Req& req, const std::string& bucket, printer.AddElement("Prefix", encoder(prefix)); - printer.AddElement("KeyCount", - (int64_t)(oinfo.objects.size() + oinfo.common_prefixes.size())); + printer.AddElement("KeyCount", (int64_t)(oinfo.objects.size() + + oinfo.common_prefixes.size())); printer.AddElement("IsTruncated", oinfo.is_truncated); if (oinfo.is_truncated) { printer.AddElement("NextContinuationToken", encoder(oinfo.key_marker)); diff --git a/src/XrdS3/XrdS3Router.cc b/src/XrdS3/XrdS3Router.cc index 928ad0ae93b..6aa65541f20 100644 --- a/src/XrdS3/XrdS3Router.cc +++ b/src/XrdS3/XrdS3Router.cc @@ -24,6 +24,7 @@ //------------------------------------------------------------------------------ #include "XrdS3Router.hh" + #include "XrdS3.hh" //------------------------------------------------------------------------------ #include @@ -154,7 +155,8 @@ bool S3Route::MatchMap( //! \param route the route //------------------------------------------------------------------------------ void S3Router::AddRoute(S3Route &route) { - S3::S3Handler::Logger()->Log(S3::ALL, "Router", "registered route: %s", route.GetName().c_str()); + S3::S3Handler::Logger()->Log(S3::ALL, "Router", "registered route: %s", + route.GetName().c_str()); routes.push_back(route); } @@ -166,7 +168,9 @@ void S3Router::AddRoute(S3Route &route) { int S3Router::ProcessReq(XrdS3Req &req) { for (const auto &route : routes) { if (route.Match(req)) { - S3::S3Handler::Logger()->Log(S3::DEBUG, "Router", "found matching route for req: %s", route.GetName().c_str()); + S3::S3Handler::Logger()->Log(S3::DEBUG, "Router", + "found matching route for req: %s", + route.GetName().c_str()); auto start = std::chrono::high_resolution_clock::now(); int rc = route.Handler()(req); auto end = std::chrono::high_resolution_clock::now(); @@ -174,11 +178,17 @@ int S3Router::ProcessReq(XrdS3Req &req) { double seconds = duration.count(); std::ostringstream oss; oss << std::fixed << std::setprecision(3) << seconds; - S3::S3Handler::Logger()->Log(S3::WARN, "Router", "%s [t=%s] [id=%s] [bucket=%s] [object=%s] [v=%s] retc=%d", req.trace.c_str(), oss.str().c_str(), req.id.c_str(), req.bucket.c_str(), req.object.c_str(), req.Verb().c_str(), rc); + S3::S3Handler::Logger()->Log( + S3::WARN, "Router", + "%s [t=%s] [id=%s] [bucket=%s] [object=%s] [v=%s] retc=%d", + req.trace.c_str(), oss.str().c_str(), req.id.c_str(), + req.bucket.c_str(), req.object.c_str(), req.Verb().c_str(), rc); return rc; } } - S3::S3Handler::Logger()->Log(S3::ERROR, "Router", "unable to find matching route for req: %s.", req.uri_path.c_str()); + S3::S3Handler::Logger()->Log(S3::ERROR, "Router", + "unable to find matching route for req: %s.", + req.uri_path.c_str()); return not_found_handler(req); } diff --git a/src/XrdS3/XrdS3Router.hh b/src/XrdS3/XrdS3Router.hh index 658048df6af..c45de4c95db 100644 --- a/src/XrdS3/XrdS3Router.hh +++ b/src/XrdS3/XrdS3Router.hh @@ -91,8 +91,7 @@ class S3Route { //------------------------------------------------------------------------------ class S3Router { public: - explicit S3Router(HandlerFunc fn) - : not_found_handler(std::move(fn)){}; + explicit S3Router(HandlerFunc fn) : not_found_handler(std::move(fn)){}; ~S3Router() = default; @@ -107,5 +106,3 @@ class S3Router { }; } // namespace S3 - - diff --git a/src/XrdS3/XrdS3ScopedFsId.hh b/src/XrdS3/XrdS3ScopedFsId.hh index f57fcb742c2..90d93465ae4 100644 --- a/src/XrdS3/XrdS3ScopedFsId.hh +++ b/src/XrdS3/XrdS3ScopedFsId.hh @@ -1,6 +1,7 @@ //------------------------------------------------------------------------------ // Copyright (c) 2024 by European Organization for Nuclear Research (CERN) -// Author: Andreas-Joachim Peters / CERN EOS Project +// Author: Andreas-Joachim Peters / CERN EOS Project +// //------------------------------------------------------------------------------ // This file is part of the XRootD software suite. // @@ -35,15 +36,12 @@ namespace S3 { //! process to the given values during the lifetime of the object. //! On destruction the fsuid and fsgid are restored to their original values. //------------------------------------------------------------------------------ -class ScopedFsId -{ -public: +class ScopedFsId { + public: //---------------------------------------------------------------------------- //! Constructor //---------------------------------------------------------------------------- - ScopedFsId(uid_t fsuid_, gid_t fsgid_) - : fsuid(fsuid_), fsgid(fsgid_) - { + ScopedFsId(uid_t fsuid_, gid_t fsgid_) : fsuid(fsuid_), fsgid(fsgid_) { ok = true; prevFsuid = -1; prevFsgid = -1; @@ -55,7 +53,8 @@ public: prevFsuid = setfsuid(fsuid); if (setfsuid(fsuid) != fsuid) { - std::cerr << "Error: Unable to set fsuid to " << fsuid << "." <= 0) { setfsuid(prevFsuid); } @@ -89,18 +88,17 @@ public: } } - bool IsOk() const - { - return ok; - } + bool IsOk() const { return ok; } static void Validate() { - ScopedFsId scope(geteuid()+1, geteuid()+1); + ScopedFsId scope(geteuid() + 1, geteuid() + 1); if (!scope.IsOk()) { - throw std::runtime_error("XrdS3 misses the capability to set the filesystem IDs on the fly!"); + throw std::runtime_error( + "XrdS3 misses the capability to set the filesystem IDs on the fly!"); } } -private: + + private: int fsuid; int fsgid; @@ -110,26 +108,24 @@ private: bool ok; }; -} // namespace S3 +} // namespace S3 #else // Dummy implementation for non-linux platforms -class ScopedFsId -{ -public: +class ScopedFsId { + public: //---------------------------------------------------------------------------- //! Constructor //---------------------------------------------------------------------------- - ScopedFsId(uid_t fsuid_, gid_t fsgid_) - : fsuid(fsuid_), fsgid(fsgid_) {} + ScopedFsId(uid_t fsuid_, gid_t fsgid_) : fsuid(fsuid_), fsgid(fsgid_) {} //---------------------------------------------------------------------------- //! Destructor //---------------------------------------------------------------------------- ~ScopedFsId() {} - bool IsOk() const { return true;} + bool IsOk() const { return true; } }; -} // namespace S3 +} // namespace S3 #endif diff --git a/src/XrdS3/XrdS3Utils.cc b/src/XrdS3/XrdS3Utils.cc index a4dd2cde8d7..aca49c85324 100644 --- a/src/XrdS3/XrdS3Utils.cc +++ b/src/XrdS3/XrdS3Utils.cc @@ -27,10 +27,11 @@ //------------------------------------------------------------------------------ #include #include + #include #include -#include #include +#include //------------------------------------------------------------------------------ #include "XrdPosix/XrdPosixExtern.hh" //------------------------------------------------------------------------------ @@ -291,7 +292,7 @@ int S3Utils::makePath(char *path, mode_t mode) { //! @param stop The path to stop at //------------------------------------------------------------------------------ void S3Utils::RmPath(std::filesystem::path path, - const std::filesystem::path &stop) { + const std::filesystem::path &stop) { while (path != stop && !XrdPosix_Rmdir(path.c_str())) { path = path.parent_path(); } @@ -317,7 +318,7 @@ std::string S3Utils::GetXattr(const std::filesystem::path &path, if (ret == -1) { return {}; } - + // Add a terminating '\0' res.resize(ret + 1); XrdPosix_Getxattr(path.c_str(), ("user.s3." + key).c_str(), res.data(), @@ -327,6 +328,7 @@ std::string S3Utils::GetXattr(const std::filesystem::path &path, } #include + #include "XrdS3XAttr.hh" #define XrdPosix_Setxattr setxattr // TODO: Replace by XrdPosix_Setxattr once implemented @@ -347,7 +349,7 @@ int S3Utils::SetXattr(const std::filesystem::path &path, const std::string &key, //------------------------------------------------------------------------------ bool S3Utils::IsDirEmpty(const std::filesystem::path &path) { auto dir = XrdPosix_Opendir(path.c_str()); - + if (dir == nullptr) { return false; } diff --git a/src/XrdS3/XrdS3Utils.hh b/src/XrdS3/XrdS3Utils.hh index 8c2eaaa14cd..132ec6fc462 100644 --- a/src/XrdS3/XrdS3Utils.hh +++ b/src/XrdS3/XrdS3Utils.hh @@ -26,6 +26,8 @@ //------------------------------------------------------------------------------ #include +#include + #include #include #include @@ -34,7 +36,6 @@ #include #include #include -#include //------------------------------------------------------------------------------ namespace S3 { @@ -152,4 +153,3 @@ class S3Utils { }; } // namespace S3 - diff --git a/src/XrdS3/XrdS3XAttr.hh b/src/XrdS3/XrdS3XAttr.hh index 0a742b1f378..cbfe4795d32 100644 --- a/src/XrdS3/XrdS3XAttr.hh +++ b/src/XrdS3/XrdS3XAttr.hh @@ -1,6 +1,7 @@ //------------------------------------------------------------------------------ // Copyright (c) 2024 by European Organization for Nuclear Research (CERN) -// Author: Andreas-Joachim Peters / CERN EOS Project +// Author: Andreas-Joachim Peters / CERN EOS Project +// //------------------------------------------------------------------------------ // This file is part of the XRootD software suite. // @@ -27,39 +28,31 @@ #ifdef __APPLE__ // Macros to translate Linux xattr function names to macOS equivalents #define getxattr(path, name, value, size) \ - getxattr(path, name, value, size, 0, 0) + getxattr(path, name, value, size, 0, 0) #define lgetxattr(path, name, value, size) \ - getxattr(path, name, value, size, 0, XATTR_NOFOLLOW) + getxattr(path, name, value, size, 0, XATTR_NOFOLLOW) -#define fgetxattr(fd, name, value, size) \ - fgetxattr(fd, name, value, size) +#define fgetxattr(fd, name, value, size) fgetxattr(fd, name, value, size) #define setxattr(path, name, value, size, flags) \ - setxattr(path, name, value, size, 0, flags) + setxattr(path, name, value, size, 0, flags) #define lsetxattr(path, name, value, size, flags) \ - setxattr(path, name, value, size, 0, XATTR_NOFOLLOW) + setxattr(path, name, value, size, 0, XATTR_NOFOLLOW) #define fsetxattr(fd, name, value, size, flags) \ - fsetxattr(fd, name, value, size, flags) + fsetxattr(fd, name, value, size, flags) -#define removexattr(path, name) \ - removexattr(path, name, 0) +#define removexattr(path, name) removexattr(path, name, 0) -#define lremovexattr(path, name) \ - removexattr(path, name, XATTR_NOFOLLOW) +#define lremovexattr(path, name) removexattr(path, name, XATTR_NOFOLLOW) -#define fremovexattr(fd, name) \ - fremovexattr(fd, name) +#define fremovexattr(fd, name) fremovexattr(fd, name) -#define listxattr(path, list, size) \ - listxattr(path, list, size, 0) +#define listxattr(path, list, size) listxattr(path, list, size, 0) -#define llistxattr(path, list, size) \ - listxattr(path, list, size, XATTR_NOFOLLOW) +#define llistxattr(path, list, size) listxattr(path, list, size, XATTR_NOFOLLOW) -#define flistxattr(fd, list, size) \ - flistxattr(fd, list, size) +#define flistxattr(fd, list, size) flistxattr(fd, list, size) #endif - diff --git a/src/XrdS3/XrdS3Xml.hh b/src/XrdS3/XrdS3Xml.hh index d2d461820b5..c98fcd8b381 100644 --- a/src/XrdS3/XrdS3Xml.hh +++ b/src/XrdS3/XrdS3Xml.hh @@ -26,6 +26,7 @@ //------------------------------------------------------------------------------ #include + #include //------------------------------------------------------------------------------ @@ -50,4 +51,3 @@ class S3Xml : public tinyxml2::XMLPrinter { }; } // namespace S3 -