diff --git a/arangod/VocBase/collection.cpp b/arangod/VocBase/collection.cpp index 82f5bfe223..d607ef18a0 100644 --- a/arangod/VocBase/collection.cpp +++ b/arangod/VocBase/collection.cpp @@ -23,28 +23,28 @@ #include "collection.h" -#include +#include +#include -#include "Basics/conversions.h" -#include "Basics/files.h" #include "Basics/FileUtils.h" -#include "Basics/hashes.h" -#include "Basics/json.h" #include "Basics/JsonHelper.h" #include "Basics/Logger.h" +#include "Basics/StringUtils.h" +#include "Basics/VelocyPackHelper.h" +#include "Basics/conversions.h" +#include "Basics/files.h" +#include "Basics/hashes.h" +#include "Basics/json.h" +#include "Basics/memory-map.h" #include "Basics/random.h" #include "Basics/tri-strings.h" -#include "Basics/memory-map.h" -#include "Basics/VelocyPackHelper.h" #include "Cluster/ClusterInfo.h" #include "VocBase/document-collection.h" #include "VocBase/server.h" #include "VocBase/vocbase.h" -#include -#include - using namespace arangodb; +using namespace arangodb::basics; //////////////////////////////////////////////////////////////////////////////// /// @brief extract the numeric part from a filename @@ -136,7 +136,8 @@ static void SortDatafiles(TRI_vector_pointer_t* files) { //////////////////////////////////////////////////////////////////////////////// static void InitCollection(TRI_vocbase_t* vocbase, TRI_collection_t* collection, - std::string const& directory, VocbaseCollectionInfo const& info) { + std::string const& directory, + VocbaseCollectionInfo const& info) { TRI_ASSERT(collection != nullptr); collection->_info.update(info); @@ -145,7 +146,8 @@ static void InitCollection(TRI_vocbase_t* vocbase, TRI_collection_t* collection, collection->_tickMax = 0; collection->_state = TRI_COL_STATE_WRITE; collection->_lastError = 0; - collection->_directory = TRI_DuplicateString(TRI_UNKNOWN_MEM_ZONE, directory.c_str(), directory.size()); + collection->_directory = TRI_DuplicateString( + TRI_UNKNOWN_MEM_ZONE, directory.c_str(), directory.size()); TRI_InitVectorPointer(&collection->_datafiles, TRI_UNKNOWN_MEM_ZONE); TRI_InitVectorPointer(&collection->_journals, TRI_UNKNOWN_MEM_ZONE); @@ -158,147 +160,144 @@ static void InitCollection(TRI_vocbase_t* vocbase, TRI_collection_t* collection, //////////////////////////////////////////////////////////////////////////////// static TRI_col_file_structure_t ScanCollectionDirectory(char const* path) { + LOG_TOPIC(TRACE, Logger::DATAFILES) << "scanning collection directory '" + << path << "'"; + TRI_col_file_structure_t structure; - regex_t re; TRI_InitVectorString(&structure._journals, TRI_CORE_MEM_ZONE); TRI_InitVectorString(&structure._compactors, TRI_CORE_MEM_ZONE); TRI_InitVectorString(&structure._datafiles, TRI_CORE_MEM_ZONE); TRI_InitVectorString(&structure._indexes, TRI_CORE_MEM_ZONE); - if (regcomp(&re, - "^(temp|compaction|journal|datafile|index|compactor)-([0-9][0-9]*" - ")\\.(db|json)(\\.dead)?$", - REG_EXTENDED) != 0) { - LOG(ERR) << "unable to compile regular expression"; - - return structure; - } - // check files within the directory std::vector files = TRI_FilesDirectory(path); for (auto const& file : files) { - regmatch_t matches[5]; + std::vector parts = StringUtils::split(file, '.'); - if (regexec(&re, file.c_str(), sizeof(matches) / sizeof(matches[0]), - matches, 0) == 0) { - // file type: (journal|datafile|index|compactor) - char const* first = file.c_str() + matches[1].rm_so; - size_t firstLen = matches[1].rm_eo - matches[1].rm_so; + if (parts.size() < 2 || parts.size() > 3 || parts[0].empty()) { + LOG_TOPIC(DEBUG, Logger::DATAFILES) + << "ignoring file '" << file + << "' because it does not look like a datafile"; + continue; + } - // extension - char const* third = file.c_str() + matches[3].rm_so; - size_t thirdLen = matches[3].rm_eo - matches[3].rm_so; + std::string filename = FileUtils::buildFilename(path, file); + std::string extension = parts[1]; + std::string isDead = (parts.size() > 2) ? parts[2] : ""; - // isdead? - size_t fourthLen = matches[4].rm_eo - matches[4].rm_so; + std::vector next = StringUtils::split(parts[0], "-"); - // ............................................................................. - // file is dead - // ............................................................................. + if (next.size() < 2) { + LOG_TOPIC(DEBUG, Logger::DATAFILES) + << "ignoring file '" << file + << "' because it does not look like a datafile"; + continue; + } - if (fourthLen > 0) { - char* filename; + std::string filetype = next[0]; + next.erase(next.begin()); + std::string qualifier = StringUtils::join(next, '-'); - filename = TRI_Concatenate2File(path, file.c_str()); + // ............................................................................. + // file is dead + // ............................................................................. - if (filename != nullptr) { - LOG(TRACE) << "removing .dead file '" << filename << "'"; - TRI_UnlinkFile(filename); - TRI_FreeString(TRI_CORE_MEM_ZONE, filename); - } - } - - // ............................................................................. - // file is an index - // ............................................................................. - - else if (TRI_EqualString("index", first, firstLen) && - TRI_EqualString("json", third, thirdLen)) { - char* filename; - - filename = TRI_Concatenate2File(path, file.c_str()); - TRI_PushBackVectorString(&structure._indexes, filename); - } - - // ............................................................................. - // file is a journal or datafile - // ............................................................................. - - else if (TRI_EqualString("db", third, thirdLen)) { - std::string filename = TRI_Concatenate2File(path, file.c_str()); - - // file is a journal - if (TRI_EqualString("journal", first, firstLen)) { - TRI_PushBackVectorString(&structure._journals, - TRI_DuplicateString(filename.c_str())); - } - - // file is a datafile - else if (TRI_EqualString("datafile", first, firstLen)) { - TRI_PushBackVectorString(&structure._datafiles, - TRI_DuplicateString(filename.c_str())); - } - - // file is a left-over compaction file. rename it back - else if (TRI_EqualString("compaction", first, firstLen)) { - char* relName; - char* newName; - - relName = TRI_Concatenate2String( - "datafile-", file.c_str() + strlen("compaction-")); - newName = TRI_Concatenate2File(path, relName); - TRI_FreeString(TRI_CORE_MEM_ZONE, relName); - - if (TRI_ExistsFile(newName)) { - // we have a compaction-xxxx and a datafile-xxxx file. we'll keep - // the datafile - TRI_UnlinkFile(filename.c_str()); - - LOG(WARN) << "removing left-over compaction file '" << filename.c_str() << "'"; - - TRI_FreeString(TRI_CORE_MEM_ZONE, newName); - continue; - } else { - int res; - - // this should fail, but shouldn't do any harm either... - TRI_UnlinkFile(newName); - - // rename the compactor to a datafile - res = TRI_RenameFile(filename.c_str(), newName); - - if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "unable to rename compaction file '" << filename.c_str() << "'"; - - TRI_FreeString(TRI_CORE_MEM_ZONE, newName); - - continue; - } - } - - TRI_PushBackVectorString(&structure._datafiles, newName); - } - - // temporary file, we can delete it! - else if (TRI_EqualString("temp", first, firstLen)) { - LOG(WARN) << "found temporary file '" << filename.c_str() << "', which is probably a left-over. deleting it"; - TRI_UnlinkFile(filename.c_str()); - } - - // ups, what kind of file is that - else { - LOG(ERR) << "unknown datafile type '" << file.c_str() << "'"; - } + if (!isDead.empty()) { + if (isDead == "dead") { + FileUtils::remove(filename); } else { - LOG(ERR) << "unknown datafile type '" << file.c_str() << "'"; + LOG_TOPIC(DEBUG, Logger::DATAFILES) + << "ignoring file '" << file + << "' because it does not look like a datafile"; + } + + continue; + } + + // ............................................................................. + // file is an index + // ............................................................................. + + if (filetype == "index" && extension == "json") { + TRI_PushBackVectorString( + &structure._indexes, + TRI_DuplicateString(filename.c_str(), filename.size())); + continue; + } + + // ............................................................................. + // file is a journal or datafile + // ............................................................................. + + if (extension == "db") { + // file is a journal + if (filetype == "journal") { + TRI_PushBackVectorString(&structure._journals, + TRI_DuplicateString(filename.c_str())); + } + + // file is a datafile + else if (filetype == "datafile") { + TRI_PushBackVectorString(&structure._datafiles, + TRI_DuplicateString(filename.c_str())); + } + + // file is a left-over compaction file. rename it back + else if (filetype == "compaction") { + std::string relName = "datafile-" + qualifier + "." + extension; + std::string newName = FileUtils::buildFilename(path, relName); + + if (FileUtils::exists(newName)) { + // we have a compaction-xxxx and a datafile-xxxx file. we'll keep + // the datafile + + FileUtils::remove(filename); + + LOG_TOPIC(WARN, Logger::DATAFILES) + << "removing left-over compaction file '" << filename.c_str() + << "'"; + + continue; + } else { + int res; + + // this should fail, but shouldn't do any harm either... + FileUtils::remove(newName); + + // rename the compactor to a datafile + res = TRI_RenameFile(filename.c_str(), newName.c_str()); + + if (res != TRI_ERROR_NO_ERROR) { + LOG_TOPIC(ERR, Logger::DATAFILES) + << "unable to rename compaction file '" << filename.c_str() + << "'"; + + continue; + } + } + + TRI_PushBackVectorString(&structure._datafiles, + TRI_DuplicateString(newName.c_str())); + } + + // temporary file, we can delete it! + else if (filetype == "temp") { + LOG_TOPIC(WARN, Logger::DATAFILES) + << "found temporary file '" << filename.c_str() + << "', which is probably a left-over. deleting it"; + TRI_UnlinkFile(filename.c_str()); + } + + // ups, what kind of file is that + else { + LOG_TOPIC(ERR, Logger::DATAFILES) << "unknown datafile type '" + << file.c_str() << "'"; } } } - regfree(&re); - // now sort the files in the structures that we created. // the sorting allows us to iterate the files in the correct order SortFilenames(&structure._journals); @@ -314,25 +313,16 @@ static TRI_col_file_structure_t ScanCollectionDirectory(char const* path) { //////////////////////////////////////////////////////////////////////////////// static bool CheckCollection(TRI_collection_t* collection, bool ignoreErrors) { + LOG_TOPIC(TRACE, Logger::DATAFILES) << "check collection directory '" + << collection->_directory << "'"; + TRI_datafile_t* datafile; TRI_vector_pointer_t all; TRI_vector_pointer_t compactors; TRI_vector_pointer_t datafiles; TRI_vector_pointer_t journals; TRI_vector_pointer_t sealed; - bool stop; - regex_t re; - - if (regcomp(&re, - "^(temp|compaction|journal|datafile|index|compactor)-([0-9][0-9]*" - ")\\.(db|json)(\\.dead)?$", - REG_EXTENDED) != 0) { - LOG(ERR) << "unable to compile regular expression"; - - return false; - } - - stop = false; + bool stop = false; // check files within the directory std::vector files = TRI_FilesDirectory(collection->_directory); @@ -344,179 +334,191 @@ static bool CheckCollection(TRI_collection_t* collection, bool ignoreErrors) { TRI_InitVectorPointer(&all, TRI_UNKNOWN_MEM_ZONE); for (auto const& file : files) { - regmatch_t matches[5]; + std::vector parts = StringUtils::split(file, '.'); - if (regexec(&re, file.c_str(), sizeof(matches) / sizeof(matches[0]), - matches, 0) == 0) { - char const* first = file.c_str() + matches[1].rm_so; - size_t firstLen = matches[1].rm_eo - matches[1].rm_so; + if (parts.size() < 2 || parts.size() > 3 || parts[0].empty()) { + LOG_TOPIC(DEBUG, Logger::DATAFILES) + << "ignoring file '" << file + << "' because it does not look like a datafile"; + continue; + } - char const* third = file.c_str() + matches[3].rm_so; - size_t thirdLen = matches[3].rm_eo - matches[3].rm_so; + std::string extension = parts[1]; + std::string isDead = (parts.size() > 2) ? parts[2] : ""; - size_t fourthLen = matches[4].rm_eo - matches[4].rm_so; + std::vector next = StringUtils::split(parts[0], "-"); - // check for temporary & dead files + if (next.size() < 2) { + LOG_TOPIC(DEBUG, Logger::DATAFILES) + << "ignoring file '" << file + << "' because it does not look like a datafile"; + continue; + } - if (fourthLen > 0 || TRI_EqualString("temp", first, firstLen)) { - // found a temporary file. we can delete it! - char* filename; + std::string filename = + FileUtils::buildFilename(collection->_directory, file); + std::string filetype = next[0]; + next.erase(next.begin()); + std::string qualifier = StringUtils::join(next, '-'); - filename = TRI_Concatenate2File(collection->_directory, file.c_str()); + // ............................................................................. + // file is dead + // ............................................................................. - LOG(TRACE) << "found temporary file '" << filename << "', which is probably a left-over. deleting it"; - TRI_UnlinkFile(filename); - TRI_Free(TRI_CORE_MEM_ZONE, filename); + if (!isDead.empty()) { + if (isDead == "dead") { + if (filetype == "temp") { + FileUtils::remove(filename); + continue; + } + } else { + LOG_TOPIC(DEBUG, Logger::DATAFILES) + << "ignoring file '" << file + << "' because it does not look like a datafile"; continue; } + } - // ............................................................................. - // file is an index, just store the filename - // ............................................................................. + // ............................................................................. + // file is an index, just store the filename + // ............................................................................. - if (TRI_EqualString("index", first, firstLen) && - TRI_EqualString("json", third, thirdLen)) { - char* filename; + if (filetype == "index" && extension == "json") { + TRI_PushBackVectorString( + &collection->_indexFiles, + TRI_DuplicateString(filename.c_str(), filename.size())); + continue; + } - filename = TRI_Concatenate2File(collection->_directory, file.c_str()); - TRI_PushBackVectorString(&collection->_indexFiles, filename); - } + // ............................................................................. + // file is a journal or datafile, open the datafile + // ............................................................................. - // ............................................................................. - // file is a journal or datafile, open the datafile - // ............................................................................. + if (extension == "db") { + TRI_col_header_marker_t* cm; - else if (TRI_EqualString("db", third, thirdLen)) { - char* filename; - char* ptr; - TRI_col_header_marker_t* cm; + // found a compaction file. now rename it back + if (filetype == "compaction") { + std::string relName = "datafile-" + qualifier + "." + extension; + std::string newName = + FileUtils::buildFilename(collection->_directory, relName); - if (TRI_EqualString("compaction", first, firstLen)) { - // found a compaction file. now rename it back - char* relName; - char* newName; + if (FileUtils::exists(newName)) { + // we have a compaction-xxxx and a datafile-xxxx file. we'll keep + // the datafile + FileUtils::remove(filename); - filename = TRI_Concatenate2File(collection->_directory, file.c_str()); - relName = TRI_Concatenate2String( - "datafile-", file.c_str() + strlen("compaction-")); - newName = TRI_Concatenate2File(collection->_directory, relName); - - TRI_FreeString(TRI_CORE_MEM_ZONE, relName); - - if (TRI_ExistsFile(newName)) { - // we have a compaction-xxxx and a datafile-xxxx file. we'll keep - // the datafile - LOG(WARN) << "removing unfinished compaction file '" << filename << "'"; - TRI_UnlinkFile(filename); - - TRI_FreeString(TRI_CORE_MEM_ZONE, newName); - TRI_FreeString(TRI_CORE_MEM_ZONE, filename); - continue; - } else { - int res; - - res = TRI_RenameFile(filename, newName); - - if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "unable to rename compaction file '" << filename << "' to '" << newName << "'"; - TRI_FreeString(TRI_CORE_MEM_ZONE, newName); - TRI_FreeString(TRI_CORE_MEM_ZONE, filename); - stop = true; - break; - } - } - - TRI_Free(TRI_CORE_MEM_ZONE, filename); - // reuse newName - filename = newName; + LOG_TOPIC(WARN, Logger::DATAFILES) + << "removing unfinished compaction file '" << filename << "'"; + continue; } else { - filename = TRI_Concatenate2File(collection->_directory, file.c_str()); - } + int res; - TRI_ASSERT(filename != nullptr); - datafile = TRI_OpenDatafile(filename, ignoreErrors); + // this should fail, but shouldn't do any harm either... + FileUtils::remove(newName); - if (datafile == nullptr) { - collection->_lastError = TRI_errno(); - LOG(ERR) << "cannot open datafile '" << filename << "': " << TRI_last_error(); + res = TRI_RenameFile(filename.c_str(), newName.c_str()); - TRI_FreeString(TRI_CORE_MEM_ZONE, filename); - stop = true; - break; - } - - TRI_PushBackVectorPointer(&all, datafile); - - // check the document header - ptr = datafile->_data; - // skip the datafile header - ptr += TRI_DF_ALIGN_BLOCK(sizeof(TRI_df_header_marker_t)); - cm = (TRI_col_header_marker_t*)ptr; - - if (cm->base._type != TRI_COL_MARKER_HEADER) { - LOG(ERR) << "collection header mismatch in file '" << filename << "', expected TRI_COL_MARKER_HEADER, found " << cm->base._type; - - TRI_FreeString(TRI_CORE_MEM_ZONE, filename); - stop = true; - break; - } - - if (cm->_cid != collection->_info.id()) { - LOG(ERR) << "collection identifier mismatch, expected " << collection->_info.id() << ", found " << cm->_cid; - - TRI_FreeString(TRI_CORE_MEM_ZONE, filename); - stop = true; - break; - } - - // file is a journal - if (TRI_EqualString("journal", first, firstLen)) { - if (datafile->_isSealed) { - if (datafile->_state != TRI_DF_STATE_READ) { - LOG(WARN) << "strange, journal '" << filename << "' is already sealed; must be a left over; will use it as datafile"; - } - - TRI_PushBackVectorPointer(&sealed, datafile); - } else { - TRI_PushBackVectorPointer(&journals, datafile); - } - } - - // file is a compactor - else if (TRI_EqualString("compactor", first, firstLen)) { - // ignore - } - - // file is a datafile (or was a compaction file) - else if (TRI_EqualString("datafile", first, firstLen) || - TRI_EqualString("compaction", first, firstLen)) { - if (!datafile->_isSealed) { - LOG(ERR) << "datafile '" << filename << "' is not sealed, this should never happen"; - - collection->_lastError = - TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_DATAFILE); - TRI_FreeString(TRI_CORE_MEM_ZONE, filename); + if (res != TRI_ERROR_NO_ERROR) { + LOG_TOPIC(ERR, Logger::DATAFILES) + << "unable to rename compaction file '" << filename << "' to '" + << newName << "'"; stop = true; break; - } else { - TRI_PushBackVectorPointer(&datafiles, datafile); } } - else { - LOG(ERR) << "unknown datafile '" << file.c_str() << "'"; - } - - TRI_FreeString(TRI_CORE_MEM_ZONE, filename); - } else { - LOG(ERR) << "unknown datafile '" << file.c_str() << "'"; + // reuse newName + filename = newName; } + + datafile = TRI_OpenDatafile(filename.c_str(), ignoreErrors); + + if (datafile == nullptr) { + collection->_lastError = TRI_errno(); + LOG_TOPIC(ERR, Logger::DATAFILES) << "cannot open datafile '" + << filename + << "': " << TRI_last_error(); + + stop = true; + break; + } + + TRI_PushBackVectorPointer(&all, datafile); + + // check the document header + char* ptr = datafile->_data; + + // skip the datafile header + ptr += TRI_DF_ALIGN_BLOCK(sizeof(TRI_df_header_marker_t)); + cm = (TRI_col_header_marker_t*)ptr; + + if (cm->base._type != TRI_COL_MARKER_HEADER) { + LOG(ERR) << "collection header mismatch in file '" << filename + << "', expected TRI_COL_MARKER_HEADER, found " + << cm->base._type; + + stop = true; + break; + } + + if (cm->_cid != collection->_info.id()) { + LOG(ERR) << "collection identifier mismatch, expected " + << collection->_info.id() << ", found " << cm->_cid; + + stop = true; + break; + } + + // file is a journal + if (filetype == "journal") { + if (datafile->_isSealed) { + if (datafile->_state != TRI_DF_STATE_READ) { + LOG_TOPIC(WARN, Logger::DATAFILES) + << "strange, journal '" << filename + << "' is already sealed; must be a left over; will use " + "it as datafile"; + } + + TRI_PushBackVectorPointer(&sealed, datafile); + } else { + TRI_PushBackVectorPointer(&journals, datafile); + } + } + + // file is a compactor + else if (filetype == "compactor") { + // ignore + } + + // file is a datafile (or was a compaction file) + else if (filetype == "datafile" || filetype == "compaction") { + if (!datafile->_isSealed) { + LOG_TOPIC(ERR, Logger::DATAFILES) + << "datafile '" << filename + << "' is not sealed, this should never happen"; + + collection->_lastError = + TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_DATAFILE); + + stop = true; + break; + } else { + TRI_PushBackVectorPointer(&datafiles, datafile); + } + } + + else { + LOG_TOPIC(ERR, Logger::DATAFILES) << "unknown datafile '" << file + << "'"; + } + } else { + LOG_TOPIC(ERR, Logger::DATAFILES) << "unknown datafile '" << file << "'"; } } - regfree(&re); - size_t i, n; + // convert the sealed journals into datafiles if (!stop) { n = sealed._length; @@ -544,7 +546,8 @@ static bool CheckCollection(TRI_collection_t* collection, bool ignoreErrors) { } else { collection->_lastError = datafile->_lastError; stop = true; - LOG(ERR) << "cannot rename sealed log-file to " << filename << ", this should not happen: " << TRI_last_error(); + LOG(ERR) << "cannot rename sealed log-file to " << filename + << ", this should not happen: " << TRI_last_error(); break; } @@ -625,7 +628,8 @@ static bool IterateDatafilesVector(const TRI_vector_pointer_t* const files, TRI_datafile_t* datafile = static_cast(TRI_AtVectorPointer(files, i)); - LOG(TRACE) << "iterating over datafile '" << datafile->getName(datafile) << "', fid " << datafile->_fid; + LOG(TRACE) << "iterating over datafile '" << datafile->getName(datafile) + << "', fid " << datafile->_fid; if (!TRI_IterateDatafile(datafile, iterator, data)) { return false; @@ -693,7 +697,7 @@ static bool IterateFiles(TRI_vector_string_t* vector, /// @brief get the full directory name for a collection //////////////////////////////////////////////////////////////////////////////// -static std::string GetCollectionDirectory(char const* path, char const* name, +static std::string GetCollectionDirectory(char const* path, char const* name, TRI_voc_cid_t cid) { TRI_ASSERT(path != nullptr); TRI_ASSERT(name != nullptr); @@ -718,7 +722,9 @@ TRI_collection_t* TRI_CreateCollection( parameters.maximalSize()) { TRI_set_errno(TRI_ERROR_ARANGO_DATAFILE_FULL); - LOG(ERR) << "cannot create datafile '" << parameters.namec_str() << "' in '" << path << "', maximal size '" << (unsigned int)parameters.maximalSize() << "' is too small"; + LOG(ERR) << "cannot create datafile '" << parameters.namec_str() << "' in '" + << path << "', maximal size '" + << (unsigned int)parameters.maximalSize() << "' is too small"; return nullptr; } @@ -726,19 +732,22 @@ TRI_collection_t* TRI_CreateCollection( if (!TRI_IsDirectory(path)) { TRI_set_errno(TRI_ERROR_ARANGO_DATADIR_INVALID); - LOG(ERR) << "cannot create collection '" << path << "', path is not a directory"; + LOG(ERR) << "cannot create collection '" << path + << "', path is not a directory"; return nullptr; } - std::string const dirname = GetCollectionDirectory( - path, parameters.namec_str(), parameters.id()); + std::string const dirname = + GetCollectionDirectory(path, parameters.namec_str(), parameters.id()); // directory must not exist if (TRI_ExistsFile(dirname.c_str())) { TRI_set_errno(TRI_ERROR_ARANGO_COLLECTION_DIRECTORY_ALREADY_EXISTS); - LOG(ERR) << "cannot create collection '" << parameters.namec_str() << "' in directory '" << dirname.c_str() << "': directory already exists"; + LOG(ERR) << "cannot create collection '" << parameters.namec_str() + << "' in directory '" << dirname.c_str() + << "': directory already exists"; return nullptr; } @@ -754,38 +763,39 @@ TRI_collection_t* TRI_CreateCollection( int res = TRI_CreateDirectory(tmpname.c_str(), systemError, errorMessage); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot create collection '" << parameters.namec_str() << "' in directory '" << path << "': " << TRI_errno_string(res) << " - " << systemError << " - " << errorMessage.c_str(); + LOG(ERR) << "cannot create collection '" << parameters.namec_str() + << "' in directory '" << path << "': " << TRI_errno_string(res) + << " - " << systemError << " - " << errorMessage.c_str(); return nullptr; } - TRI_IF_FAILURE("CreateCollection::tempDirectory") { - return nullptr; - } + TRI_IF_FAILURE("CreateCollection::tempDirectory") { return nullptr; } // create a temporary file - std::string const tmpfile(arangodb::basics::FileUtils::buildFilename(tmpname.c_str(), ".tmp")); + std::string const tmpfile( + arangodb::basics::FileUtils::buildFilename(tmpname.c_str(), ".tmp")); res = TRI_WriteFile(tmpfile.c_str(), "", 0); - TRI_IF_FAILURE("CreateCollection::tempFile") { - return nullptr; - } + TRI_IF_FAILURE("CreateCollection::tempFile") { return nullptr; } if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot create collection '" << parameters.namec_str() << "' in directory '" << path << "': " << TRI_errno_string(res) << " - " << systemError << " - " << errorMessage.c_str(); + LOG(ERR) << "cannot create collection '" << parameters.namec_str() + << "' in directory '" << path << "': " << TRI_errno_string(res) + << " - " << systemError << " - " << errorMessage.c_str(); TRI_RemoveDirectory(tmpname.c_str()); return nullptr; } - TRI_IF_FAILURE("CreateCollection::renameDirectory") { - return nullptr; - } + TRI_IF_FAILURE("CreateCollection::renameDirectory") { return nullptr; } res = TRI_RenameFile(tmpname.c_str(), dirname.c_str()); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot create collection '" << parameters.namec_str() << "' in directory '" << path << "': " << TRI_errno_string(res) << " - " << systemError << " - " << errorMessage.c_str(); + LOG(ERR) << "cannot create collection '" << parameters.namec_str() + << "' in directory '" << path << "': " << TRI_errno_string(res) + << " - " << systemError << " - " << errorMessage.c_str(); TRI_RemoveDirectory(tmpname.c_str()); return nullptr; @@ -1080,7 +1090,8 @@ VocbaseCollectionInfo VocbaseCollectionInfo::fromFile( char* filename = TRI_Concatenate2File(path, TRI_VOC_PARAMETER_FILE); if (filename == nullptr) { - LOG(ERR) << "cannot load parameter info for collection '" << path << "', out of memory"; + LOG(ERR) << "cannot load parameter info for collection '" << path + << "', out of memory"; THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); } @@ -1094,7 +1105,8 @@ VocbaseCollectionInfo VocbaseCollectionInfo::fromFile( arangodb::basics::VelocyPackHelper::velocyPackFromFile(filePath); VPackSlice slice = content->slice(); if (!slice.isObject()) { - LOG(ERR) << "cannot open '" << filename << "', collection parameters are not readable"; + LOG(ERR) << "cannot open '" << filename + << "', collection parameters are not readable"; TRI_FreeString(TRI_CORE_MEM_ZONE, filename); THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_ILLEGAL_PARAMETER_FILE); } @@ -1124,7 +1136,8 @@ VocbaseCollectionInfo VocbaseCollectionInfo::fromFile( if (info.name()[0] != '\0') { // only warn if the collection version is older than expected, and if it's // not a shape collection - LOG(WARN) << "collection '" << info.namec_str() << "' has an old version and needs to be upgraded."; + LOG(WARN) << "collection '" << info.namec_str() + << "' has an old version and needs to be upgraded."; } } return info; @@ -1215,7 +1228,8 @@ int VocbaseCollectionInfo::saveToFile(char const* path, bool forceSync) const { TRI_json_t* json = TRI_CreateJsonCollectionInfo(*this); if (json == nullptr) { - LOG(ERR) << "cannot save collection properties file '" << filename << "': " << TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY); + LOG(ERR) << "cannot save collection properties file '" << filename + << "': " << TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY); TRI_FreeString(TRI_CORE_MEM_ZONE, filename); return TRI_ERROR_OUT_OF_MEMORY; } @@ -1226,7 +1240,8 @@ int VocbaseCollectionInfo::saveToFile(char const* path, bool forceSync) const { int res; if (!ok) { res = TRI_errno(); - LOG(ERR) << "cannot save collection properties file '" << filename << "': " << TRI_last_error(); + LOG(ERR) << "cannot save collection properties file '" << filename + << "': " << TRI_last_error(); } else { res = TRI_ERROR_NO_ERROR; } @@ -1527,7 +1542,8 @@ void TRI_IterateIndexCollection(TRI_collection_t* collection, bool ok = iterator(filename, data); if (!ok) { - LOG(ERR) << "cannot load index '" << filename << "' for collection '" << collection->_info.namec_str() << "'"; + LOG(ERR) << "cannot load index '" << filename << "' for collection '" + << collection->_info.namec_str() << "'"; } } } @@ -1559,14 +1575,16 @@ TRI_collection_t* TRI_OpenCollection(TRI_vocbase_t* vocbase, double start = TRI_microtime(); - LOG_TOPIC(TRACE, Logger::PERFORMANCE) << - "open-collection { collection: " << vocbase->_name << "/" << collection->_info.name(); + LOG_TOPIC(TRACE, Logger::PERFORMANCE) + << "open-collection { collection: " << vocbase->_name << "/" + << collection->_info.name(); // check for journals and datafiles bool ok = CheckCollection(collection, ignoreErrors); if (!ok) { - LOG(DEBUG) << "cannot open '" << collection->_directory << "', check failed"; + LOG(DEBUG) << "cannot open '" << collection->_directory + << "', check failed"; if (collection->_directory != nullptr) { TRI_FreeString(TRI_CORE_MEM_ZONE, collection->_directory); @@ -1576,11 +1594,15 @@ TRI_collection_t* TRI_OpenCollection(TRI_vocbase_t* vocbase, return nullptr; } - LOG_TOPIC(TRACE, Logger::PERFORMANCE) << "[timer] " << Logger::DURATION(TRI_microtime() - start) << " s, open-collection { collection: " << vocbase->_name << "/" << collection->_info.name() << " }"; + LOG_TOPIC(TRACE, Logger::PERFORMANCE) + << "[timer] " << Logger::DURATION(TRI_microtime() - start) + << " s, open-collection { collection: " << vocbase->_name << "/" + << collection->_info.name() << " }"; return collection; } catch (...) { - LOG(ERR) << "cannot load collection parameter file '" << path << "': " << TRI_last_error(); + LOG(ERR) << "cannot load collection parameter file '" << path + << "': " << TRI_last_error(); return nullptr; } } diff --git a/arangod/VocBase/server.cpp b/arangod/VocBase/server.cpp index fcbae06f87..020ad95b74 100644 --- a/arangod/VocBase/server.cpp +++ b/arangod/VocBase/server.cpp @@ -27,21 +27,20 @@ #include "Basics/win-utils.h" #endif -#include - #include "Aql/QueryCache.h" #include "Aql/QueryRegistry.h" -#include "Basics/conversions.h" #include "Basics/Exceptions.h" -#include "Basics/files.h" #include "Basics/FileUtils.h" +#include "Basics/JsonHelper.h" +#include "Basics/Logger.h" +#include "Basics/MutexLocker.h" +#include "Basics/StringUtils.h" +#include "Basics/conversions.h" +#include "Basics/files.h" #include "Basics/hashes.h" #include "Basics/json.h" -#include "Basics/JsonHelper.h" #include "Basics/locks.h" -#include "Basics/Logger.h" #include "Basics/memory-map.h" -#include "Basics/MutexLocker.h" #include "Basics/random.h" #include "Basics/tri-strings.h" #include "Cluster/ServerState.h" @@ -52,6 +51,8 @@ #include "Wal/LogfileManager.h" #include "Wal/Marker.h" +using namespace arangodb::basics; + //////////////////////////////////////////////////////////////////////////////// /// @brief page size //////////////////////////////////////////////////////////////////////////////// @@ -193,7 +194,8 @@ static int WriteServerId(char const* filename) { builder.close(); } catch (...) { // out of memory - LOG(ERR) << "cannot save server id in file '" << filename << "': out of memory"; + LOG(ERR) << "cannot save server id in file '" << filename + << "': out of memory"; return TRI_ERROR_OUT_OF_MEMORY; } @@ -203,7 +205,8 @@ static int WriteServerId(char const* filename) { filename, builder.slice(), true); if (!ok) { - LOG(ERR) << "could not save server id in file '" << filename << "': " << TRI_last_error(); + LOG(ERR) << "could not save server id in file '" << filename + << "': " << TRI_last_error(); return TRI_ERROR_INTERNAL; } @@ -307,9 +310,11 @@ static int CreateBaseApplicationDirectory(char const* basePath, LOG(INFO) << "created base application directory '" << path << "'"; } else { if ((res != TRI_ERROR_FILE_EXISTS) || (!TRI_IsDirectory(path))) { - LOG(ERR) << "unable to create base application directory " << errorMessage.c_str(); + LOG(ERR) << "unable to create base application directory " + << errorMessage.c_str(); } else { - LOG(INFO) << "otherone created base application directory '" << path << "'"; + LOG(INFO) << "otherone created base application directory '" << path + << "'"; res = TRI_ERROR_NO_ERROR; } } @@ -341,15 +346,20 @@ static int CreateApplicationDirectory(char const* name, char const* basePath) { if (res == TRI_ERROR_NO_ERROR) { if (arangodb::wal::LogfileManager::instance()->isInRecovery()) { - LOG(TRACE) << "created application directory '" << path << "' for database '" << name << "'"; + LOG(TRACE) << "created application directory '" << path + << "' for database '" << name << "'"; } else { - LOG(INFO) << "created application directory '" << path << "' for database '" << name << "'"; + LOG(INFO) << "created application directory '" << path + << "' for database '" << name << "'"; } } else if (res == TRI_ERROR_FILE_EXISTS) { - LOG(INFO) << "unable to create application directory '" << path << "' for database '" << name << "': " << errorMessage.c_str(); + LOG(INFO) << "unable to create application directory '" << path + << "' for database '" << name + << "': " << errorMessage.c_str(); res = TRI_ERROR_NO_ERROR; } else { - LOG(ERR) << "unable to create application directory '" << path << "' for database '" << name << "': " << errorMessage.c_str(); + LOG(ERR) << "unable to create application directory '" << path + << "' for database '" << name << "': " << errorMessage.c_str(); } } @@ -363,9 +373,7 @@ static int CreateApplicationDirectory(char const* name, char const* basePath) { /// @brief iterate over all databases in the databases directory and open them //////////////////////////////////////////////////////////////////////////////// -static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { - regmatch_t matches[2]; - +static int OpenDatabases(TRI_server_t* server, bool isUpgrade) { if (server->_iterateMarkersOnOpen && !server->_hasCreatedSystemDatabase) { LOG(WARN) << "no shutdown info found. scanning datafiles for last tick..."; } @@ -393,15 +401,15 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { // construct and validate path // ......................................................................... - std::string const databaseDirectory(arangodb::basics::FileUtils::buildFilename(server->_databasePath, name.c_str())); + std::string const databaseDirectory( + arangodb::basics::FileUtils::buildFilename(server->_databasePath, + name.c_str())); if (!TRI_IsDirectory(databaseDirectory.c_str())) { continue; } - if (regexec(regex, name.c_str(), sizeof(matches) / sizeof(matches[0]), - matches, 0) != 0) { - // name does not match the pattern, ignore this directory + if (! StringUtils::isPrefix(name, "database-")) { continue; } @@ -411,14 +419,16 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { // the database directory we found is not writable for the current user // this can cause serious trouble so we will abort the server start if we // encounter this situation - LOG(ERR) << "database directory '" << databaseDirectory.c_str() << "' is not writable for current user"; + LOG(ERR) << "database directory '" << databaseDirectory.c_str() + << "' is not writable for current user"; res = TRI_ERROR_ARANGO_DATADIR_NOT_WRITABLE; break; } // we have a writable directory... - std::string const tmpfile(arangodb::basics::FileUtils::buildFilename(databaseDirectory.c_str(), ".tmp")); + std::string const tmpfile(arangodb::basics::FileUtils::buildFilename( + databaseDirectory.c_str(), ".tmp")); if (TRI_ExistsFile(tmpfile.c_str())) { // still a temporary... must ignore @@ -433,24 +443,29 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { // ......................................................................... // now read data from parameter.json file - std::string const parametersFile(arangodb::basics::FileUtils::buildFilename(databaseDirectory, TRI_VOC_PARAMETER_FILE)); + std::string const parametersFile(arangodb::basics::FileUtils::buildFilename( + databaseDirectory, TRI_VOC_PARAMETER_FILE)); if (!TRI_ExistsFile(parametersFile.c_str())) { // no parameter.json file - LOG(ERR) << "database directory '" << databaseDirectory.c_str() << "' does not contain parameters file or parameters file cannot be read"; + LOG(ERR) << "database directory '" << databaseDirectory.c_str() + << "' does not contain parameters file or parameters file " + "cannot be read"; // abort res = TRI_ERROR_ARANGO_ILLEGAL_PARAMETER_FILE; break; } - LOG(DEBUG) << "reading database parameters from file '" << parametersFile.c_str() << "'"; + LOG(DEBUG) << "reading database parameters from file '" + << parametersFile.c_str() << "'"; std::shared_ptr builder; try { - builder = - arangodb::basics::VelocyPackHelper::velocyPackFromFile(parametersFile); + builder = arangodb::basics::VelocyPackHelper::velocyPackFromFile( + parametersFile); } catch (...) { - LOG(ERR) << "database directory '" << databaseDirectory.c_str() << "' does not contain a valid parameters file"; + LOG(ERR) << "database directory '" << databaseDirectory.c_str() + << "' does not contain a valid parameters file"; // abort res = TRI_ERROR_ARANGO_ILLEGAL_PARAMETER_FILE; @@ -464,9 +479,11 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { if (arangodb::basics::VelocyPackHelper::getBooleanValue(parameters, "deleted", false)) { // database is deleted, skip it! - LOG(INFO) << "found dropped database in directory '" << databaseDirectory.c_str() << "'"; + LOG(INFO) << "found dropped database in directory '" + << databaseDirectory.c_str() << "'"; - LOG(INFO) << "removing superfluous database directory '" << databaseDirectory.c_str() << "'"; + LOG(INFO) << "removing superfluous database directory '" + << databaseDirectory.c_str() << "'"; TRI_RemoveDirectory(databaseDirectory.c_str()); continue; @@ -474,7 +491,8 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { VPackSlice idSlice = parameters.get("id"); if (!idSlice.isString()) { - LOG(ERR) << "database directory '" << databaseDirectory.c_str() << "' does not contain a valid parameters file"; + LOG(ERR) << "database directory '" << databaseDirectory.c_str() + << "' does not contain a valid parameters file"; res = TRI_ERROR_ARANGO_ILLEGAL_PARAMETER_FILE; break; } @@ -485,7 +503,8 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { VPackSlice nameSlice = parameters.get("name"); if (!nameSlice.isString()) { - LOG(ERR) << "database directory '" << databaseDirectory.c_str() << "' does not contain a valid parameters file"; + LOG(ERR) << "database directory '" << databaseDirectory.c_str() + << "' does not contain a valid parameters file"; res = TRI_ERROR_ARANGO_ILLEGAL_PARAMETER_FILE; break; @@ -512,9 +531,9 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { // ......................................................................... // try to open this database - TRI_vocbase_t* vocbase = - TRI_OpenVocBase(server, databaseDirectory.c_str(), id, databaseName.c_str(), &defaults, - isUpgrade, server->_iterateMarkersOnOpen); + TRI_vocbase_t* vocbase = TRI_OpenVocBase( + server, databaseDirectory.c_str(), id, databaseName.c_str(), &defaults, + isUpgrade, server->_iterateMarkersOnOpen); if (vocbase == nullptr) { // grab last error @@ -525,7 +544,9 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { res = TRI_ERROR_INTERNAL; } - LOG(ERR) << "could not process database directory '" << databaseDirectory.c_str() << "' for database '" << name.c_str() << "': " << TRI_errno_string(res); + LOG(ERR) << "could not process database directory '" + << databaseDirectory.c_str() << "' for database '" + << name.c_str() << "': " << TRI_errno_string(res); break; } @@ -540,13 +561,15 @@ static int OpenDatabases(TRI_server_t* server, regex_t* regex, bool isUpgrade) { } } catch (...) { res = TRI_ERROR_OUT_OF_MEMORY; - LOG(ERR) << "could not add database '" << name.c_str() << "': out of memory"; + LOG(ERR) << "could not add database '" << name.c_str() + << "': out of memory"; break; } TRI_ASSERT(found == nullptr); - LOG(INFO) << "loaded database '" << vocbase->_name << "' from '" << vocbase->_path << "'"; + LOG(INFO) << "loaded database '" << vocbase->_name << "' from '" + << vocbase->_path << "'"; } server->_databasesLists = newLists; @@ -648,7 +671,8 @@ static int CloseDroppedDatabases(TRI_server_t* server) { } else if (vocbase->_type == TRI_VOCBASE_TYPE_COORDINATOR) { delete vocbase; } else { - LOG(ERR) << "unknown database type " << vocbase->_type << " " << vocbase->_name << " - close doing nothing."; + LOG(ERR) << "unknown database type " << vocbase->_type << " " + << vocbase->_name << " - close doing nothing."; } } @@ -661,43 +685,31 @@ static int CloseDroppedDatabases(TRI_server_t* server) { /// @brief get the names of all databases in the ArangoDB 1.4 layout //////////////////////////////////////////////////////////////////////////////// -static int GetDatabases(TRI_server_t* server, std::vector& databases) { - regmatch_t matches[2]; - +static int GetDatabases(TRI_server_t* server, + std::vector& databases) { TRI_ASSERT(server != nullptr); - regex_t re; - int res = regcomp(&re, "^database-([0-9][0-9]*)$", REG_EXTENDED); - - if (res != 0) { - LOG(ERR) << "unable to compile regular expression"; - - return TRI_ERROR_INTERNAL; - } - std::vector files = TRI_FilesDirectory(server->_databasePath); - res = TRI_ERROR_NO_ERROR; + int res = TRI_ERROR_NO_ERROR; for (auto const& name : files) { TRI_ASSERT(!name.empty()); - if (regexec(&re, name.c_str(), sizeof(matches) / sizeof(matches[0]), - matches, 0) != 0) { + if (! StringUtils::isPrefix(name, "database-")) { // found some other file continue; } // found a database name - std::string const dname(arangodb::basics::FileUtils::buildFilename(server->_databasePath, name.c_str())); + std::string const dname(arangodb::basics::FileUtils::buildFilename( + server->_databasePath, name.c_str())); if (TRI_IsDirectory(dname.c_str())) { databases.push_back(name); } } - regfree(&re); - // sort by id std::sort(databases.begin(), databases.end(), DatabaseIdStringComparator); @@ -709,33 +721,21 @@ static int GetDatabases(TRI_server_t* server, std::vector& database //////////////////////////////////////////////////////////////////////////////// static bool HasOldCollections(TRI_server_t* server) { - regex_t re; - regmatch_t matches[3]; - TRI_ASSERT(server != nullptr); - if (regcomp(&re, "^collection-([0-9][0-9]*)(-[0-9]+)?$", REG_EXTENDED) != 0) { - LOG(ERR) << "unable to compile regular expression"; - - return false; - } - bool found = false; std::vector files = TRI_FilesDirectory(server->_basePath); for (auto const& name : files) { TRI_ASSERT(!name.empty()); - if (regexec(&re, name.c_str(), sizeof(matches) / sizeof(matches[0]), - matches, 0) == 0) { + if (StringUtils::isPrefix(name, "collection-")) { // found "collection-xxxx". we can ignore the rest found = true; break; } } - regfree(&re); - return found; } @@ -806,7 +806,8 @@ static int CreateDatabaseDirectory(TRI_server_t* server, TRI_voc_tick_t tick, TRI_ASSERT(databaseName != nullptr); std::string const dname("database-" + std::to_string(tick)); - std::string const dirname(arangodb::basics::FileUtils::buildFilename(server->_databasePath, dname.c_str())); + std::string const dirname(arangodb::basics::FileUtils::buildFilename( + server->_databasePath, dname.c_str())); // use a temporary directory first. otherwise, if creation fails, the server // might be left with an empty database directory at restart, and abort. @@ -824,21 +825,19 @@ static int CreateDatabaseDirectory(TRI_server_t* server, TRI_voc_tick_t tick, if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_FILE_EXISTS) { - LOG(ERR) << "failed to create database directory: " << errorMessage.c_str(); + LOG(ERR) << "failed to create database directory: " + << errorMessage.c_str(); } return res; } - TRI_IF_FAILURE("CreateDatabase::tempDirectory") { - return TRI_ERROR_DEBUG; - } + TRI_IF_FAILURE("CreateDatabase::tempDirectory") { return TRI_ERROR_DEBUG; } - std::string const tmpfile(arangodb::basics::FileUtils::buildFilename(tmpname, ".tmp")); + std::string const tmpfile( + arangodb::basics::FileUtils::buildFilename(tmpname, ".tmp")); res = TRI_WriteFile(tmpfile.c_str(), "", 0); - TRI_IF_FAILURE("CreateDatabase::tempFile") { - return TRI_ERROR_DEBUG; - } + TRI_IF_FAILURE("CreateDatabase::tempFile") { return TRI_ERROR_DEBUG; } if (res != TRI_ERROR_NO_ERROR) { TRI_RemoveDirectory(tmpname.c_str()); @@ -848,9 +847,7 @@ static int CreateDatabaseDirectory(TRI_server_t* server, TRI_voc_tick_t tick, // finally rename res = TRI_RenameFile(tmpname.c_str(), dirname.c_str()); - TRI_IF_FAILURE("CreateDatabase::renameDirectory") { - return TRI_ERROR_DEBUG; - } + TRI_IF_FAILURE("CreateDatabase::renameDirectory") { return TRI_ERROR_DEBUG; } if (res != TRI_ERROR_NO_ERROR) { TRI_RemoveDirectory(tmpname.c_str()); // clean up @@ -859,7 +856,8 @@ static int CreateDatabaseDirectory(TRI_server_t* server, TRI_voc_tick_t tick, // now everything is valid - res = SaveDatabaseParameters(tick, databaseName, false, defaults, dirname.c_str()); + res = SaveDatabaseParameters(tick, databaseName, false, defaults, + dirname.c_str()); if (res != TRI_ERROR_NO_ERROR) { return res; @@ -867,7 +865,8 @@ static int CreateDatabaseDirectory(TRI_server_t* server, TRI_voc_tick_t tick, // finally remove the .tmp file { - std::string const tmpfile(arangodb::basics::FileUtils::buildFilename(dirname, ".tmp")); + std::string const tmpfile( + arangodb::basics::FileUtils::buildFilename(dirname, ".tmp")); TRI_UnlinkFile(tmpfile.c_str()); } @@ -891,7 +890,8 @@ static int InitDatabases(TRI_server_t* server, bool checkVersion, if (res == TRI_ERROR_NO_ERROR) { if (names.empty()) { if (!performUpgrade && HasOldCollections(server)) { - LOG(ERR) << "no databases found. Please start the server with the --upgrade option"; + LOG(ERR) << "no databases found. Please start the server with the " + "--upgrade option"; return TRI_ERROR_ARANGO_DATADIR_INVALID; } @@ -934,7 +934,8 @@ static int WriteCreateMarker(TRI_voc_tick_t id, VPackSlice const& slice) { } if (res != TRI_ERROR_NO_ERROR) { - LOG(WARN) << "could not save create database marker in log: " << TRI_errno_string(res); + LOG(WARN) << "could not save create database marker in log: " + << TRI_errno_string(res); } return res; @@ -964,7 +965,8 @@ static int WriteDropMarker(TRI_voc_tick_t id) { } if (res != TRI_ERROR_NO_ERROR) { - LOG(WARN) << "could not save drop database marker in log: " << TRI_errno_string(res); + LOG(WARN) << "could not save drop database marker in log: " + << TRI_errno_string(res); } return res; @@ -1042,7 +1044,9 @@ static void DatabaseManager(void* data) { // remember the database path char* path; - LOG(TRACE) << "physically removing database directory '" << database->_path << "' of database '" << database->_name << "'"; + LOG(TRACE) << "physically removing database directory '" + << database->_path << "' of database '" << database->_name + << "'"; // remove apps directory for database if (database->_isOwnAppsDirectory && strlen(server->_appPath) > 0) { @@ -1050,7 +1054,8 @@ static void DatabaseManager(void* data) { if (path != nullptr) { if (TRI_IsDirectory(path)) { - LOG(TRACE) << "removing app directory '" << path << "' of database '" << database->_name << "'"; + LOG(TRACE) << "removing app directory '" << path + << "' of database '" << database->_name << "'"; TRI_RemoveDirectory(path); } @@ -1231,14 +1236,16 @@ int TRI_StartServer(TRI_server_t* server, bool checkVersion, int res; if (!TRI_IsDirectory(server->_basePath)) { - LOG(ERR) << "database path '" << server->_basePath << "' is not a directory"; + LOG(ERR) << "database path '" << server->_basePath + << "' is not a directory"; return TRI_ERROR_ARANGO_DATADIR_INVALID; } if (!TRI_IsWritable(server->_basePath)) { // database directory is not writable for the current user... bad luck - LOG(ERR) << "database directory '" << server->_basePath << "' is not writable for current user"; + LOG(ERR) << "database directory '" << server->_basePath + << "' is not writable for current user"; return TRI_ERROR_ARANGO_DATADIR_NOT_WRITABLE; } @@ -1250,7 +1257,8 @@ int TRI_StartServer(TRI_server_t* server, bool checkVersion, res = TRI_VerifyLockFile(server->_lockFilename); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "database is locked, please check the lock file '" << server->_lockFilename << "'"; + LOG(ERR) << "database is locked, please check the lock file '" + << server->_lockFilename << "'"; return TRI_ERROR_ARANGO_DATADIR_LOCKED; } @@ -1262,7 +1270,9 @@ int TRI_StartServer(TRI_server_t* server, bool checkVersion, res = TRI_CreateLockFile(server->_lockFilename); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot lock the database directory, please check the lock file '" << server->_lockFilename << "': " << TRI_errno_string(res); + LOG(ERR) + << "cannot lock the database directory, please check the lock file '" + << server->_lockFilename << "': " << TRI_errno_string(res); return TRI_ERROR_ARANGO_DATADIR_UNLOCKABLE; } @@ -1278,7 +1288,8 @@ int TRI_StartServer(TRI_server_t* server, bool checkVersion, } if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "reading/creating server file failed: " << TRI_errno_string(res); + LOG(ERR) << "reading/creating server file failed: " + << TRI_errno_string(res); return res; } @@ -1293,7 +1304,8 @@ int TRI_StartServer(TRI_server_t* server, bool checkVersion, res = TRI_CreateDirectory(server->_databasePath, systemError, errorMessage); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "unable to create database directory '" << server->_databasePath << "': " << errorMessage.c_str(); + LOG(ERR) << "unable to create database directory '" + << server->_databasePath << "': " << errorMessage.c_str(); return TRI_ERROR_ARANGO_DATADIR_NOT_WRITABLE; } @@ -1302,7 +1314,8 @@ int TRI_StartServer(TRI_server_t* server, bool checkVersion, } if (!TRI_IsWritable(server->_databasePath)) { - LOG(ERR) << "database directory '" << server->_databasePath << "' is not writable"; + LOG(ERR) << "database directory '" << server->_databasePath + << "' is not writable"; return TRI_ERROR_ARANGO_DATADIR_NOT_WRITABLE; } @@ -1334,9 +1347,11 @@ int TRI_StartServer(TRI_server_t* server, bool checkVersion, errorMessage); if (res) { - LOG(INFO) << "created --javascript.app-path directory '" << server->_appPath << "'."; + LOG(INFO) << "created --javascript.app-path directory '" + << server->_appPath << "'."; } else { - LOG(ERR) << "unable to create --javascript.app-path directory '" << server->_appPath << "': " << errorMessage.c_str(); + LOG(ERR) << "unable to create --javascript.app-path directory '" + << server->_appPath << "': " << errorMessage.c_str(); return TRI_ERROR_SYS_ERROR; } } @@ -1360,22 +1375,12 @@ int TRI_StartServer(TRI_server_t* server, bool checkVersion, // open and scan all databases // ........................................................................... - regex_t regex; - res = regcomp(®ex, "^database-([0-9][0-9]*)$", REG_EXTENDED); - - if (res != 0) { - LOG(ERR) << "unable to compile regular expression"; - - return TRI_ERROR_OUT_OF_MEMORY; - } - // scan all databases - res = OpenDatabases(server, ®ex, performUpgrade); - - regfree(®ex); + res = OpenDatabases(server, performUpgrade); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "could not iterate over all databases: " << TRI_errno_string(res); + LOG(ERR) << "could not iterate over all databases: " + << TRI_errno_string(res); return res; } @@ -1413,12 +1418,14 @@ int TRI_InitDatabasesServer(TRI_server_t* server) { if (vocbase->_replicationApplier->_configuration._autoStart) { if (server->_disableReplicationAppliers) { - LOG(INFO) << "replication applier explicitly deactivated for database '" << vocbase->_name << "'"; + LOG(INFO) << "replication applier explicitly deactivated for database '" + << vocbase->_name << "'"; } else { int res = vocbase->_replicationApplier->start(0, false, 0); if (res != TRI_ERROR_NO_ERROR) { - LOG(WARN) << "unable to start replication applier for database '" << vocbase->_name << "': " << TRI_errno_string(res); + LOG(WARN) << "unable to start replication applier for database '" + << vocbase->_name << "': " << TRI_errno_string(res); } } } @@ -1503,7 +1510,8 @@ int TRI_CreateCoordinatorDatabaseServer(TRI_server_t* server, res = TRI_ERROR_INTERNAL; } - LOG(ERR) << "could not create database '" << name << "': " << TRI_errno_string(res); + LOG(ERR) << "could not create database '" << name + << "': " << TRI_errno_string(res); return res; } @@ -1591,7 +1599,7 @@ int TRI_CreateDatabaseServer(TRI_server_t* server, TRI_voc_tick_t databaseId, if (databaseId == 0) { databaseId = TRI_NewTickServer(); } - + std::string dirname; res = CreateDatabaseDirectory(server, databaseId, name, defaults, dirname); @@ -1599,16 +1607,19 @@ int TRI_CreateDatabaseServer(TRI_server_t* server, TRI_voc_tick_t databaseId, return res; } - std::string const path(arangodb::basics::FileUtils::buildFilename(server->_databasePath, dirname.c_str())); + std::string const path(arangodb::basics::FileUtils::buildFilename( + server->_databasePath, dirname.c_str())); if (arangodb::wal::LogfileManager::instance()->isInRecovery()) { - LOG(TRACE) << "creating database '" << name << "', directory '" << path.c_str() << "'"; + LOG(TRACE) << "creating database '" << name << "', directory '" + << path.c_str() << "'"; } else { - LOG(INFO) << "creating database '" << name << "', directory '" << path.c_str() << "'"; + LOG(INFO) << "creating database '" << name << "', directory '" + << path.c_str() << "'"; } - vocbase = - TRI_OpenVocBase(server, path.c_str(), databaseId, name, defaults, false, false); + vocbase = TRI_OpenVocBase(server, path.c_str(), databaseId, name, defaults, + false, false); if (vocbase == nullptr) { // grab last error @@ -1619,7 +1630,8 @@ int TRI_CreateDatabaseServer(TRI_server_t* server, TRI_voc_tick_t databaseId, res = TRI_ERROR_INTERNAL; } - LOG(ERR) << "could not create database '" << name << "': " << TRI_errno_string(res); + LOG(ERR) << "could not create database '" << name + << "': " << TRI_errno_string(res); return res; } @@ -1647,12 +1659,15 @@ int TRI_CreateDatabaseServer(TRI_server_t* server, TRI_voc_tick_t databaseId, // start the replication applier if (vocbase->_replicationApplier->_configuration._autoStart) { if (server->_disableReplicationAppliers) { - LOG(INFO) << "replication applier explicitly deactivated for database '" << name << "'"; + LOG(INFO) + << "replication applier explicitly deactivated for database '" + << name << "'"; } else { res = vocbase->_replicationApplier->start(0, false, 0); if (res != TRI_ERROR_NO_ERROR) { - LOG(WARN) << "unable to start replication applier for database '" << name << "': " << TRI_errno_string(res); + LOG(WARN) << "unable to start replication applier for database '" + << name << "': " << TRI_errno_string(res); } } } @@ -1694,7 +1709,7 @@ int TRI_CreateDatabaseServer(TRI_server_t* server, TRI_voc_tick_t databaseId, } //////////////////////////////////////////////////////////////////////////////// -/// @brief activates or deactivates deadlock detection in all existing +/// @brief activates or deactivates deadlock detection in all existing /// databases //////////////////////////////////////////////////////////////////////////////// @@ -1831,9 +1846,11 @@ int TRI_DropDatabaseServer(TRI_server_t* server, char const* name, if (TRI_DropVocBase(vocbase)) { if (arangodb::wal::LogfileManager::instance()->isInRecovery()) { - LOG(TRACE) << "dropping database '" << vocbase->_name << "', directory '" << vocbase->_path << "'"; + LOG(TRACE) << "dropping database '" << vocbase->_name << "', directory '" + << vocbase->_path << "'"; } else { - LOG(INFO) << "dropping database '" << vocbase->_name << "', directory '" << vocbase->_path << "'"; + LOG(INFO) << "dropping database '" << vocbase->_name << "', directory '" + << vocbase->_path << "'"; } res = SaveDatabaseParameters(vocbase->_id, vocbase->_name, true, diff --git a/arangod/VocBase/vocbase.cpp b/arangod/VocBase/vocbase.cpp index 298b8bbbf6..9d4816a4be 100644 --- a/arangod/VocBase/vocbase.cpp +++ b/arangod/VocBase/vocbase.cpp @@ -61,6 +61,8 @@ #include #include +using namespace arangodb::basics; + //////////////////////////////////////////////////////////////////////////////// /// @brief sleep interval used when polling for a loading collection's status //////////////////////////////////////////////////////////////////////////////// @@ -215,7 +217,8 @@ static int WriteDropCollectionMarker(TRI_vocbase_t* vocbase, } if (res != TRI_ERROR_NO_ERROR) { - LOG(WARN) << "could not save collection drop marker in log: " << TRI_errno_string(res); + LOG(WARN) << "could not save collection drop marker in log: " + << TRI_errno_string(res); } return res; @@ -251,7 +254,7 @@ static bool UnregisterCollection(TRI_vocbase_t* vocbase, // post-condition TRI_ASSERT(vocbase->_collectionsByName._nrUsed == - vocbase->_collectionsById._nrUsed); + vocbase->_collectionsById._nrUsed); return true; } @@ -304,7 +307,8 @@ static bool UnloadCollectionCallback(TRI_collection_t* col, void* data) { if (res != TRI_ERROR_NO_ERROR) { std::string const colName(collection->name()); - LOG(ERR) << "failed to close collection '" << colName.c_str() << "': " << TRI_last_error(); + LOG(ERR) << "failed to close collection '" << colName.c_str() + << "': " << TRI_last_error(); collection->_status = TRI_VOC_COL_STATUS_CORRUPTED; @@ -342,8 +346,8 @@ static bool DropCollectionCallback(TRI_collection_t* col, void* data) { res = regcomp(&re, "^(.*)\\\\collection-([0-9][0-9]*)(-[0-9]+)?$", REG_ICASE | REG_EXTENDED); #else - res = - regcomp(&re, "^(.*)/collection-([0-9][0-9]*)(-[0-9]+)?$", REG_ICASE | REG_EXTENDED); + res = regcomp(&re, "^(.*)/collection-([0-9][0-9]*)(-[0-9]+)?$", + REG_ICASE | REG_EXTENDED); #endif if (res != 0) { @@ -373,7 +377,8 @@ static bool DropCollectionCallback(TRI_collection_t* col, void* data) { res = TRI_CloseDocumentCollection(document, false); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "failed to close collection '" << name.c_str() << "': " << TRI_last_error(); + LOG(ERR) << "failed to close collection '" << name.c_str() + << "': " << TRI_last_error(); TRI_WRITE_UNLOCK_STATUS_VOCBASE_COL(collection); @@ -452,23 +457,29 @@ static bool DropCollectionCallback(TRI_collection_t* col, void* data) { // perform the rename res = TRI_RenameFile(collection->pathc_str(), newFilename); - LOG(TRACE) << "renaming collection directory from '" << collection->pathc_str() << "' to '" << newFilename << "'"; + LOG(TRACE) << "renaming collection directory from '" + << collection->pathc_str() << "' to '" << newFilename << "'"; if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot rename dropped collection '" << name.c_str() << "' from '" << collection->pathc_str() << "' to '" << newFilename << "': " << TRI_errno_string(res); + LOG(ERR) << "cannot rename dropped collection '" << name.c_str() + << "' from '" << collection->pathc_str() << "' to '" + << newFilename << "': " << TRI_errno_string(res); } else { - LOG(DEBUG) << "wiping dropped collection '" << name.c_str() << "' from disk"; + LOG(DEBUG) << "wiping dropped collection '" << name.c_str() + << "' from disk"; res = TRI_RemoveDirectory(newFilename); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot wipe dropped collection '" << name.c_str() << "' from disk: " << TRI_errno_string(res); + LOG(ERR) << "cannot wipe dropped collection '" << name.c_str() + << "' from disk: " << TRI_errno_string(res); } } TRI_FreeString(TRI_CORE_MEM_ZONE, newFilename); } else { - LOG(ERR) << "cannot rename dropped collection '" << name.c_str() << "': unknown path '" << collection->pathc_str() << "'"; + LOG(ERR) << "cannot rename dropped collection '" << name.c_str() + << "': unknown path '" << collection->pathc_str() << "'"; } } @@ -499,7 +510,9 @@ static TRI_vocbase_col_t* AddCollection(TRI_vocbase_t* vocbase, if (found != nullptr) { LOG(ERR) << "duplicate entry for collection name '" << name << "'"; - LOG(ERR) << "collection id " << cid << " has same name as already added collection " << static_cast(found)->_cid; + LOG(ERR) << "collection id " << cid + << " has same name as already added collection " + << static_cast(found)->_cid; TRI_set_errno(TRI_ERROR_ARANGO_DUPLICATE_NAME); @@ -522,7 +535,8 @@ static TRI_vocbase_col_t* AddCollection(TRI_vocbase_t* vocbase, if (found != nullptr) { TRI_RemoveKeyAssociativePointer(&vocbase->_collectionsByName, name); - LOG(ERR) << "duplicate collection identifier " << collection->_cid << " for name '" << name << "'"; + LOG(ERR) << "duplicate collection identifier " << collection->_cid + << " for name '" << name << "'"; TRI_set_errno(TRI_ERROR_ARANGO_DUPLICATE_IDENTIFIER); @@ -539,7 +553,7 @@ static TRI_vocbase_col_t* AddCollection(TRI_vocbase_t* vocbase, } TRI_ASSERT(vocbase->_collectionsByName._nrUsed == - vocbase->_collectionsById._nrUsed); + vocbase->_collectionsById._nrUsed); // this needs the write lock on _collectionsLock // TODO: if the following goes wrong, we still have the collection added into @@ -721,7 +735,7 @@ static int RenameCollection(TRI_vocbase_t* vocbase, TRI_ASSERT(found == nullptr); TRI_ASSERT(vocbase->_collectionsByName._nrUsed == - vocbase->_collectionsById._nrUsed); + vocbase->_collectionsById._nrUsed); } // _colllectionsLock // to prevent caching returning now invalid old collection name in db's @@ -761,28 +775,17 @@ static bool StartupTickIterator(TRI_df_marker_t const* marker, void* data, static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, bool iterateMarkers) { - regmatch_t matches[3]; - regex_t re; - - int res = regcomp(&re, "^collection-([0-9][0-9]*)(-[0-9]+)?$", REG_EXTENDED | REG_NOSUB); - - if (res != 0) { - LOG(ERR) << "unable to compile regular expression"; - - return res; - } - std::vector files = TRI_FilesDirectory(path); if (iterateMarkers) { - LOG(TRACE) << "scanning all collection markers in database '" << vocbase->_name; + LOG(TRACE) << "scanning all collection markers in database '" + << vocbase->_name; } for (auto const& name : files) { TRI_ASSERT(!name.empty()); - if (regexec(&re, name.c_str(), sizeof(matches) / sizeof(matches[0]), - matches, 0) != 0) { + if (!StringUtils::isSuffix(name, "collection-")) { // no match, ignore this file continue; } @@ -790,7 +793,8 @@ static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, char* filePtr = TRI_Concatenate2File(path, name.c_str()); if (filePtr == nullptr) { - LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT(); + LOG(FATAL) << "out of memory"; + FATAL_ERROR_EXIT(); } std::string file = filePtr; @@ -803,9 +807,8 @@ static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, // this can cause serious trouble so we will abort the server start if // we // encounter this situation - LOG(ERR) << "database subdirectory '" << file.c_str() << "' is not writable for current user"; - - regfree(&re); + LOG(ERR) << "database subdirectory '" << file.c_str() + << "' is not writable for current user"; return TRI_set_errno(TRI_ERROR_ARANGO_DATADIR_NOT_WRITABLE); } @@ -823,7 +826,8 @@ static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, // we found a collection that is marked as deleted. // deleted collections should be removed on startup. this is the // default - LOG(DEBUG) << "collection '" << name.c_str() << "' was deleted, wiping it"; + LOG(DEBUG) << "collection '" << name.c_str() + << "' was deleted, wiping it"; res = TRI_RemoveDirectory(file.c_str()); @@ -838,25 +842,27 @@ static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, // collection is too "old" if (!isUpgrade) { - LOG(ERR) << "collection '" << info.namec_str() << "' has a too old version. Please start the server with the --upgrade option."; - - regfree(&re); + LOG(ERR) << "collection '" << info.namec_str() + << "' has a too old version. Please start the server " + "with the --upgrade option."; return TRI_set_errno(res); } else { if (info.version() < TRI_COL_VERSION_13) { - LOG(ERR) << "collection '" << info.namec_str() << "' is too old to be upgraded with this ArangoDB version."; + LOG(ERR) << "collection '" << info.namec_str() + << "' is too old to be upgraded with this ArangoDB " + "version."; res = TRI_ERROR_ARANGO_ILLEGAL_STATE; } else { - LOG(INFO) << "upgrading collection '" << info.namec_str() << "'"; + LOG(INFO) << "upgrading collection '" << info.namec_str() + << "'"; res = TRI_ERROR_NO_ERROR; } if (res == TRI_ERROR_NO_ERROR && info.version() < TRI_COL_VERSION_20) { - LOG(ERR) << "format of collection '" << info.namec_str() << "' is too old"; - - regfree(&re); + LOG(ERR) << "format of collection '" << info.namec_str() + << "' is too old"; return TRI_set_errno(res); } @@ -873,9 +879,8 @@ static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, } if (c == nullptr) { - LOG(ERR) << "failed to add document collection from '" << file.c_str() << "'"; - - regfree(&re); + LOG(ERR) << "failed to add document collection from '" + << file.c_str() << "'"; return TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_COLLECTION); } @@ -893,7 +898,8 @@ static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, TRI_UpdateTickServer(tick); } - LOG(DEBUG) << "added document collection '" << info.namec_str() << "' from '" << file.c_str() << "'"; + LOG(DEBUG) << "added document collection '" << info.namec_str() + << "' from '" << file.c_str() << "'"; } } catch (arangodb::basics::Exception const& e) { @@ -911,8 +917,9 @@ static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, TRI_Free(TRI_CORE_MEM_ZONE, tmpfile); res = e.code(); - LOG(ERR) << "cannot read collection info file in directory '" << file.c_str() << "': " << TRI_errno_string(res); - regfree(&re); + LOG(ERR) << "cannot read collection info file in directory '" + << file.c_str() << "': " << TRI_errno_string(res); + return TRI_set_errno(res); } } else { @@ -920,8 +927,6 @@ static int ScanPath(TRI_vocbase_t* vocbase, char const* path, bool isUpgrade, } } - regfree(&re); - return TRI_ERROR_NO_ERROR; } @@ -1076,7 +1081,8 @@ static int LoadCollectionVocBase(TRI_vocbase_t* vocbase, } std::string const colName(collection->name()); - LOG(ERR) << "unknown collection status " << collection->_status << " for '" << colName.c_str() << "'"; + LOG(ERR) << "unknown collection status " << collection->_status << " for '" + << colName.c_str() << "'"; TRI_WRITE_UNLOCK_STATUS_VOCBASE_COL(collection); return TRI_set_errno(TRI_ERROR_INTERNAL); @@ -1280,13 +1286,6 @@ std::shared_ptr TRI_vocbase_col_t::toVelocyPack( void TRI_vocbase_col_t::toVelocyPackIndexes(VPackBuilder& builder, TRI_voc_tick_t maxTick) { TRI_ASSERT(!builder.isClosed()); - regex_t re; - - if (regcomp(&re, "^index-[0-9][0-9]*\\.json$", REG_EXTENDED | REG_NOSUB) != - 0) { - LOG(ERR) << "unable to compile regular expression"; - THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); - } std::vector files = TRI_FilesDirectory(_path.c_str()); @@ -1294,7 +1293,8 @@ void TRI_vocbase_col_t::toVelocyPackIndexes(VPackBuilder& builder, std::sort(files.begin(), files.end(), FilenameStringComparator); for (auto const& file : files) { - if (regexec(&re, file.c_str(), (size_t)0, nullptr, 0) == 0) { + if (StringUtils::isPrefix(file, "index-") && + StringUtils::isSuffix(file, ".json")) { // TODO: fix memleak char* fqn = TRI_Concatenate2File(_path.c_str(), file.c_str()); std::string path = std::string(fqn, strlen(fqn)); @@ -1328,8 +1328,6 @@ void TRI_vocbase_col_t::toVelocyPackIndexes(VPackBuilder& builder, } } } - - regfree(&re); } std::shared_ptr TRI_vocbase_col_t::toVelocyPackIndexes( @@ -1418,7 +1416,9 @@ TRI_vocbase_t* TRI_OpenVocBase(TRI_server_t* server, char const* path, vocbase->_replicationApplier = TRI_CreateReplicationApplier(server, vocbase); if (vocbase->_replicationApplier == nullptr) { - LOG(FATAL) << "initializing replication applier for database '" << vocbase->_name << "' failed: " << TRI_last_error(); FATAL_ERROR_EXIT(); + LOG(FATAL) << "initializing replication applier for database '" + << vocbase->_name << "' failed: " << TRI_last_error(); + FATAL_ERROR_EXIT(); } // we are done @@ -1825,7 +1825,8 @@ TRI_vocbase_col_t* TRI_CreateCollectionVocBase( res = TRI_ERROR_INTERNAL; } - LOG(WARN) << "could not save collection create marker in log: " << TRI_errno_string(res); + LOG(WARN) << "could not save collection create marker in log: " + << TRI_errno_string(res); // TODO: what to do here? return collection; @@ -2032,7 +2033,8 @@ int TRI_RenameCollectionVocBase(TRI_vocbase_t* vocbase, } if (res != TRI_ERROR_NO_ERROR) { - LOG(WARN) << "could not save collection rename marker in log: " << TRI_errno_string(res); + LOG(WARN) << "could not save collection rename marker in log: " + << TRI_errno_string(res); } } diff --git a/arangod/Wal/LogfileManager.cpp b/arangod/Wal/LogfileManager.cpp index 537419581b..da973773e5 100644 --- a/arangod/Wal/LogfileManager.cpp +++ b/arangod/Wal/LogfileManager.cpp @@ -22,15 +22,16 @@ //////////////////////////////////////////////////////////////////////////////// #include "LogfileManager.h" -#include "Basics/files.h" -#include "Basics/hashes.h" -#include "Basics/Logger.h" + #include "Basics/Exceptions.h" #include "Basics/FileUtils.h" +#include "Basics/Logger.h" #include "Basics/MutexLocker.h" #include "Basics/ReadLocker.h" #include "Basics/StringUtils.h" #include "Basics/WriteLocker.h" +#include "Basics/files.h" +#include "Basics/hashes.h" #include "Basics/memory-map.h" #include "VocBase/server.h" #include "Wal/AllocatorThread.h" @@ -40,6 +41,7 @@ #include "Wal/Slots.h" #include "Wal/SynchronizerThread.h" +using namespace arangodb::basics; using namespace arangodb::wal; //////////////////////////////////////////////////////////////////////////////// @@ -137,19 +139,10 @@ LogfileManager::LogfileManager(TRI_server_t* server, std::string* databasePath) _droppedDatabases(), _idLock(), _writeThrottled(0), - _filenameRegex(), _shutdown(0) { LOG(TRACE) << "creating WAL logfile manager"; TRI_ASSERT(!_allowWrites); - int res = - regcomp(&_filenameRegex, "^logfile-([0-9][0-9]*)\\.db$", REG_EXTENDED); - - if (res != 0) { - THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, - "could not compile regex"); - } - _transactions.reserve(32); _failedTransactions.reserve(32); } @@ -163,8 +156,6 @@ LogfileManager::~LogfileManager() { stop(); - regfree(&_filenameRegex); - for (auto& it : _barriers) { delete it.second; } @@ -204,8 +195,9 @@ void LogfileManager::initialize(std::string* path, TRI_server_t* server) { /// {@inheritDoc} //////////////////////////////////////////////////////////////////////////////// -void LogfileManager::setupOptions(std::map< - std::string, arangodb::basics::ProgramOptionsDescription>& options) { +void LogfileManager::setupOptions( + std::map& + options) { options["Write-ahead log options:help-wal"]( "wal.allow-oversize-entries", &_allowOversizeEntries, "allow entries that are bigger than --wal.logfile-size")( @@ -256,9 +248,12 @@ bool LogfileManager::prepare() { systemErrorStr); if (res) { - LOG(INFO) << "created database directory '" << _directory.c_str() << "'."; + LOG(INFO) << "created database directory '" << _directory.c_str() + << "'."; } else { - LOG(FATAL) << "unable to create database directory: " << systemErrorStr.c_str(); FATAL_ERROR_EXIT(); + LOG(FATAL) << "unable to create database directory: " + << systemErrorStr.c_str(); + FATAL_ERROR_EXIT(); } } @@ -271,7 +266,9 @@ bool LogfileManager::prepare() { } if (_directory.empty()) { - LOG(FATAL) << "no directory specified for WAL logfiles. Please use the --wal.directory option"; FATAL_ERROR_EXIT(); + LOG(FATAL) << "no directory specified for WAL logfiles. Please use the " + "--wal.directory option"; + FATAL_ERROR_EXIT(); } if (_directory[_directory.size() - 1] != TRI_DIR_SEPARATOR_CHAR) { @@ -281,23 +278,34 @@ bool LogfileManager::prepare() { if (_filesize < MinFileSize()) { // minimum filesize per logfile - LOG(FATAL) << "invalid value for --wal.logfile-size. Please use a value of at least " << MinFileSize(); FATAL_ERROR_EXIT(); + LOG(FATAL) << "invalid value for --wal.logfile-size. Please use a value of " + "at least " + << MinFileSize(); + FATAL_ERROR_EXIT(); } _filesize = (uint32_t)(((_filesize + PageSize - 1) / PageSize) * PageSize); if (_numberOfSlots < MinSlots() || _numberOfSlots > MaxSlots()) { // invalid number of slots - LOG(FATAL) << "invalid value for --wal.slots. Please use a value between " << MinSlots() << " and " << MaxSlots(); FATAL_ERROR_EXIT(); + LOG(FATAL) << "invalid value for --wal.slots. Please use a value between " + << MinSlots() << " and " << MaxSlots(); + FATAL_ERROR_EXIT(); } if (_throttleWhenPending > 0 && _throttleWhenPending < MinThrottleWhenPending()) { - LOG(FATAL) << "invalid value for --wal.throttle-when-pending. Please use a value of at least " << MinThrottleWhenPending(); FATAL_ERROR_EXIT(); + LOG(FATAL) << "invalid value for --wal.throttle-when-pending. Please use a " + "value of at least " + << MinThrottleWhenPending(); + FATAL_ERROR_EXIT(); } if (_syncInterval < MinSyncInterval()) { - LOG(FATAL) << "invalid value for --wal.sync-interval. Please use a value of at least " << MinSyncInterval(); FATAL_ERROR_EXIT(); + LOG(FATAL) << "invalid value for --wal.sync-interval. Please use a value " + "of at least " + << MinSyncInterval(); + FATAL_ERROR_EXIT(); } // sync interval is specified in milliseconds by the user, but internally @@ -324,7 +332,8 @@ bool LogfileManager::start() { int res = inventory(); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "could not create WAL logfile inventory: " << TRI_errno_string(res); + LOG(ERR) << "could not create WAL logfile inventory: " + << TRI_errno_string(res); return false; } @@ -337,7 +346,8 @@ bool LogfileManager::start() { res = readShutdownInfo(); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "could not open shutdown file '" << shutdownFile.c_str() << "': " << TRI_errno_string(res); + LOG(ERR) << "could not open shutdown file '" << shutdownFile.c_str() + << "': " << TRI_errno_string(res); return false; } } else { @@ -353,7 +363,10 @@ bool LogfileManager::start() { started = true; - LOG(TRACE) << "WAL logfile manager configuration: historic logfiles: " << _historicLogfiles << ", reserve logfiles: " << _reserveLogfiles << ", filesize: " << _filesize << ", sync interval: " << _syncInterval; + LOG(TRACE) << "WAL logfile manager configuration: historic logfiles: " + << _historicLogfiles << ", reserve logfiles: " << _reserveLogfiles + << ", filesize: " << _filesize + << ", sync interval: " << _syncInterval; return true; } @@ -423,14 +436,16 @@ bool LogfileManager::open() { res = startAllocatorThread(); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "could not start WAL allocator thread: " << TRI_errno_string(res); + LOG(ERR) << "could not start WAL allocator thread: " + << TRI_errno_string(res); return false; } res = startSynchronizerThread(); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "could not start WAL synchronizer thread: " << TRI_errno_string(res); + LOG(ERR) << "could not start WAL synchronizer thread: " + << TRI_errno_string(res); return false; } @@ -463,7 +478,8 @@ bool LogfileManager::open() { res = startCollectorThread(); if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "could not start WAL collector thread: " << TRI_errno_string(res); + LOG(ERR) << "could not start WAL collector thread: " + << TRI_errno_string(res); return false; } @@ -898,12 +914,15 @@ int LogfileManager::flush(bool waitForSync, bool waitForCollector, return TRI_ERROR_NO_ERROR; } - LOG(TRACE) << "about to flush active WAL logfile. currentLogfileId: " << lastOpenLogfileId << ", waitForSync: " << waitForSync << ", waitForCollector: " << waitForCollector; + LOG(TRACE) << "about to flush active WAL logfile. currentLogfileId: " + << lastOpenLogfileId << ", waitForSync: " << waitForSync + << ", waitForCollector: " << waitForCollector; int res = _slots->flush(waitForSync); if (res != TRI_ERROR_NO_ERROR && res != TRI_ERROR_ARANGO_DATAFILE_EMPTY) { - LOG(ERR) << "unexpected error in WAL flush request: " << TRI_errno_string(res); + LOG(ERR) << "unexpected error in WAL flush request: " + << TRI_errno_string(res); return res; } @@ -915,7 +934,8 @@ int LogfileManager::flush(bool waitForSync, bool waitForCollector, if (res == TRI_ERROR_NO_ERROR) { // we need to wait for the collector... - // LOG(TRACE) << "entering waitForCollector with lastOpenLogfileId " << // (unsigned long long) lastOpenLogfileId; + // LOG(TRACE) << "entering waitForCollector with lastOpenLogfileId " << // + // (unsigned long long) lastOpenLogfileId; res = this->waitForCollector(lastOpenLogfileId, maxWaitTime); } else if (res == TRI_ERROR_ARANGO_DATAFILE_EMPTY) { // current logfile is empty and cannot be collected @@ -1126,24 +1146,25 @@ void LogfileManager::collectLogfileBarriers() { auto logfileBarrier = (*it).second; if (logfileBarrier->expires <= now) { - LOG_TOPIC(TRACE, Logger::REPLICATION) << "garbage-collecting expired WAL logfile barrier " << logfileBarrier->id; + LOG_TOPIC(TRACE, Logger::REPLICATION) + << "garbage-collecting expired WAL logfile barrier " + << logfileBarrier->id; it = _barriers.erase(it); delete logfileBarrier; - } - else { + } else { ++it; } } } - + //////////////////////////////////////////////////////////////////////////////// /// @brief returns a list of all logfile barrier ids //////////////////////////////////////////////////////////////////////////////// std::vector LogfileManager::getLogfileBarriers() { std::vector result; - + { READ_LOCKER(barrierLock, _barriersLock); result.reserve(_barriers.size()); @@ -1170,11 +1191,12 @@ bool LogfileManager::removeLogfileBarrier(TRI_voc_tick_t id) { } auto logfileBarrier = (*it).second; - LOG_TOPIC(DEBUG, Logger::REPLICATION) << "removing WAL logfile barrier " << logfileBarrier->id; + LOG_TOPIC(DEBUG, Logger::REPLICATION) << "removing WAL logfile barrier " + << logfileBarrier->id; _barriers.erase(it); delete logfileBarrier; - + return true; } @@ -1182,12 +1204,15 @@ bool LogfileManager::removeLogfileBarrier(TRI_voc_tick_t id) { /// @brief adds a barrier that prevents removal of logfiles //////////////////////////////////////////////////////////////////////////////// -TRI_voc_tick_t LogfileManager::addLogfileBarrier(TRI_voc_tick_t minTick, double ttl) { +TRI_voc_tick_t LogfileManager::addLogfileBarrier(TRI_voc_tick_t minTick, + double ttl) { TRI_voc_tick_t id = TRI_NewTickServer(); double expires = TRI_microtime() + ttl; auto logfileBarrier = std::make_unique(id, expires, minTick); - LOG_TOPIC(DEBUG, Logger::REPLICATION) << "adding WAL logfile barrier " << logfileBarrier->id << ", minTick: " << minTick; + LOG_TOPIC(DEBUG, Logger::REPLICATION) << "adding WAL logfile barrier " + << logfileBarrier->id + << ", minTick: " << minTick; { WRITE_LOCKER(barrierLock, _barriersLock); @@ -1203,7 +1228,8 @@ TRI_voc_tick_t LogfileManager::addLogfileBarrier(TRI_voc_tick_t minTick, double /// @brief extend the lifetime of a logfile barrier //////////////////////////////////////////////////////////////////////////////// -bool LogfileManager::extendLogfileBarrier(TRI_voc_tick_t id, double ttl, TRI_voc_tick_t tick) { +bool LogfileManager::extendLogfileBarrier(TRI_voc_tick_t id, double ttl, + TRI_voc_tick_t tick) { WRITE_LOCKER(barrierLock, _barriersLock); auto it = _barriers.find(id); @@ -1211,7 +1237,7 @@ bool LogfileManager::extendLogfileBarrier(TRI_voc_tick_t id, double ttl, TRI_voc if (it == _barriers.end()) { return false; } - + auto logfileBarrier = (*it).second; logfileBarrier->expires = TRI_microtime() + ttl; @@ -1219,8 +1245,10 @@ bool LogfileManager::extendLogfileBarrier(TRI_voc_tick_t id, double ttl, TRI_voc // patch tick logfileBarrier->minTick = tick; } - - LOG_TOPIC(TRACE, Logger::REPLICATION) << "extending WAL logfile barrier " << logfileBarrier->id << ", minTick: " << logfileBarrier->minTick; + + LOG_TOPIC(TRACE, Logger::REPLICATION) + << "extending WAL logfile barrier " << logfileBarrier->id + << ", minTick: " << logfileBarrier->minTick; return true; } @@ -1236,7 +1264,9 @@ TRI_voc_tick_t LogfileManager::getMinBarrierTick() { for (auto const& it : _barriers) { auto logfileBarrier = it.second; - LOG_TOPIC(TRACE, Logger::REPLICATION) << "server has WAL logfile barrier " << logfileBarrier->id << ", minTick: " << logfileBarrier->minTick; + LOG_TOPIC(TRACE, Logger::REPLICATION) + << "server has WAL logfile barrier " << logfileBarrier->id + << ", minTick: " << logfileBarrier->minTick; if (value == 0 || value < logfileBarrier->minTick) { value = logfileBarrier->minTick; @@ -1436,7 +1466,8 @@ int LogfileManager::getWriteableLogfile(uint32_t size, } TRI_ASSERT(result == nullptr); - LOG(WARN) << "unable to acquire writeable WAL logfile after " << (MaxIterations * SleepTime) / 1000 << " ms"; + LOG(WARN) << "unable to acquire writeable WAL logfile after " + << (MaxIterations * SleepTime) / 1000 << " ms"; return TRI_ERROR_LOCK_TIMEOUT; } @@ -1529,10 +1560,11 @@ Logfile* LogfileManager::getRemovableLogfile() { continue; } - if (logfile->id() <= minId && - logfile->canBeRemoved() && - (minBarrierTick == 0 || (logfile->df()->_tickMin < minBarrierTick && logfile->df()->_tickMax < minBarrierTick))) { - // only check those logfiles that are outside the ranges specified by barriers + if (logfile->id() <= minId && logfile->canBeRemoved() && + (minBarrierTick == 0 || (logfile->df()->_tickMin < minBarrierTick && + logfile->df()->_tickMax < minBarrierTick))) { + // only check those logfiles that are outside the ranges specified by + // barriers if (first == nullptr) { // note the oldest of the logfiles (_logfiles is a map, thus sorted) @@ -1729,7 +1761,8 @@ void LogfileManager::removeLogfile(Logfile* logfile) { // now physically remove the file if (!basics::FileUtils::remove(filename, &res)) { - LOG(ERR) << "unable to remove logfile '" << filename.c_str() << "': " << TRI_errno_string(res); + LOG(ERR) << "unable to remove logfile '" << filename.c_str() + << "': " << TRI_errno_string(res); } } @@ -1746,7 +1779,8 @@ int LogfileManager::waitForCollector(Logfile::IdType logfileId, // if specified, wait for a shorter period of time maxIterations = static_cast(maxWaitTime * 1000000.0 / (double)SingleWaitPeriod); - LOG(TRACE) << "will wait for max. " << maxWaitTime << " seconds for collector to finish"; + LOG(TRACE) << "will wait for max. " << maxWaitTime + << " seconds for collector to finish"; } LOG(TRACE) << "waiting for collector thread to collect logfile " << logfileId; @@ -1761,7 +1795,8 @@ int LogfileManager::waitForCollector(Logfile::IdType logfileId, int res = _collectorThread->waitForResult(SingleWaitPeriod); - // LOG(TRACE) << "still waiting for collector. logfileId: " << logfileId << " lastCollected: + // LOG(TRACE) << "still waiting for collector. logfileId: " << logfileId << + // " lastCollected: // " << // _lastCollectedId << ", result: " << res; if (res != TRI_ERROR_LOCK_TIMEOUT && res != TRI_ERROR_NO_ERROR) { @@ -1792,9 +1827,12 @@ int LogfileManager::runRecovery() { } if (_ignoreRecoveryErrors) { - LOG(INFO) << "running WAL recovery (" << _recoverState->logfilesToProcess.size() << " logfiles), ignoring recovery errors"; + LOG(INFO) << "running WAL recovery (" + << _recoverState->logfilesToProcess.size() + << " logfiles), ignoring recovery errors"; } else { - LOG(INFO) << "running WAL recovery (" << _recoverState->logfilesToProcess.size() << " logfiles)"; + LOG(INFO) << "running WAL recovery (" + << _recoverState->logfilesToProcess.size() << " logfiles)"; } // now iterate over all logfiles that we found during recovery @@ -1889,7 +1927,9 @@ int LogfileManager::readShutdownInfo() { _lastCollectedId = static_cast(lastCollectedId); _lastSealedId = static_cast(lastSealedId); - LOG(TRACE) << "initial values for WAL logfile manager: tick: " << lastTick << ", lastCollected: " << _lastCollectedId.load() << ", lastSealed: " << _lastSealedId.load(); + LOG(TRACE) << "initial values for WAL logfile manager: tick: " << lastTick + << ", lastCollected: " << _lastCollectedId.load() + << ", lastSealed: " << _lastSealedId.load(); } return TRI_ERROR_NO_ERROR; @@ -2106,17 +2146,16 @@ int LogfileManager::inventory() { std::vector files = basics::FileUtils::listFiles(_directory); for (auto it = files.begin(); it != files.end(); ++it) { - regmatch_t matches[2]; std::string const file = (*it); - char const* s = file.c_str(); - if (regexec(&_filenameRegex, s, sizeof(matches) / sizeof(matches[1]), - matches, 0) == 0) { - Logfile::IdType const id = basics::StringUtils::uint64( - s + matches[1].rm_so, matches[1].rm_eo - matches[1].rm_so); + if (StringUtils::isPrefix(file, "logfile-") && + StringUtils::isSuffix(file, ".db")) { + Logfile::IdType const id = + basics::StringUtils::uint64(file.substr(8, file.size() - 8 - 3)); if (id == 0) { - LOG(WARN) << "encountered invalid id for logfile '" << file.c_str() << "'. ids must be > 0"; + LOG(WARN) << "encountered invalid id for logfile '" << file + << "'. ids must be > 0"; } else { // update global tick TRI_UpdateTickServer(static_cast(id)); @@ -2146,7 +2185,9 @@ int LogfileManager::inspectLogfiles() { if (logfile != nullptr) { std::string const logfileName = logfile->filename(); - LOG(DEBUG) << "logfile " << logfile->id() << ", filename '" << logfileName.c_str() << "', status " << logfile->statusText().c_str(); + LOG(DEBUG) << "logfile " << logfile->id() << ", filename '" + << logfileName.c_str() << "', status " + << logfile->statusText().c_str(); } } #endif @@ -2191,17 +2232,22 @@ int LogfileManager::inspectLogfiles() { _recoverState->logfilesToProcess.push_back(logfile); } - LOG(TRACE) << "inspecting logfile " << logfile->id() << " (" << logfile->statusText().c_str() << ")"; + LOG(TRACE) << "inspecting logfile " << logfile->id() << " (" + << logfile->statusText().c_str() << ")"; // update the tick statistics if (!TRI_IterateDatafile(logfile->df(), &RecoverState::InitialScanMarker, static_cast(_recoverState))) { std::string const logfileName = logfile->filename(); - LOG(WARN) << "WAL inspection failed when scanning logfile '" << logfileName.c_str() << "'"; + LOG(WARN) << "WAL inspection failed when scanning logfile '" + << logfileName.c_str() << "'"; return TRI_ERROR_ARANGO_RECOVERY; } - LOG(TRACE) << "inspected logfile " << logfile->id() << " (" << logfile->statusText().c_str() << "), tickMin: " << logfile->df()->_tickMin << ", tickMax: " << logfile->df()->_tickMax; + LOG(TRACE) << "inspected logfile " << logfile->id() << " (" + << logfile->statusText().c_str() + << "), tickMin: " << logfile->df()->_tickMin + << ", tickMax: " << logfile->df()->_tickMax; if (logfile->status() == Logfile::StatusType::SEALED) { // If it is sealed, switch to random access: @@ -2241,7 +2287,8 @@ int LogfileManager::createReserveLogfile(uint32_t size) { Logfile::IdType const id = nextId(); std::string const filename = logfileName(id); - LOG(TRACE) << "creating empty logfile '" << filename.c_str() << "' with size " << size; + LOG(TRACE) << "creating empty logfile '" << filename.c_str() << "' with size " + << size; uint32_t realsize; if (size > 0 && size > filesize()) { @@ -2291,11 +2338,13 @@ int LogfileManager::ensureDirectory() { } if (!basics::FileUtils::isDirectory(directory)) { - LOG(INFO) << "WAL directory '" << directory.c_str() << "' does not exist. creating it..."; + LOG(INFO) << "WAL directory '" << directory.c_str() + << "' does not exist. creating it..."; int res; if (!basics::FileUtils::createDirectory(directory, &res)) { - LOG(ERR) << "could not create WAL directory: '" << directory.c_str() << "': " << TRI_last_error(); + LOG(ERR) << "could not create WAL directory: '" << directory.c_str() + << "': " << TRI_last_error(); return TRI_ERROR_SYS_ERROR; } } diff --git a/arangod/Wal/LogfileManager.h b/arangod/Wal/LogfileManager.h index ac1b9b40ac..017d430365 100644 --- a/arangod/Wal/LogfileManager.h +++ b/arangod/Wal/LogfileManager.h @@ -25,6 +25,7 @@ #define ARANGOD_WAL_LOGFILE_MANAGER_H 1 #include "Basics/Common.h" + #include "Basics/Mutex.h" #include "Basics/ReadWriteLock.h" #include "ApplicationServer/ApplicationFeature.h" @@ -33,8 +34,6 @@ #include "Wal/Marker.h" #include "Wal/Slots.h" -#include - struct TRI_server_t; namespace arangodb { @@ -982,12 +981,6 @@ class LogfileManager : public rest::ApplicationFeature { int _writeThrottled; - ////////////////////////////////////////////////////////////////////////////// - /// @brief regex to match logfiles - ////////////////////////////////////////////////////////////////////////////// - - regex_t _filenameRegex; - ////////////////////////////////////////////////////////////////////////////// /// @brief whether or not we have been shut down already ////////////////////////////////////////////////////////////////////////////// diff --git a/lib/Basics/Logger.cpp b/lib/Basics/Logger.cpp index 3f361e7671..2d978b2330 100644 --- a/lib/Basics/Logger.cpp +++ b/lib/Basics/Logger.cpp @@ -218,7 +218,7 @@ LogAppenderFile::LogAppenderFile(std::string const& filename, bool fatal2stderr, THROW_ARANGO_EXCEPTION(TRI_ERROR_CANNOT_WRITE_FILE); } - + _fd.store(fd); } } @@ -232,7 +232,6 @@ void LogAppenderFile::logMessage(LogLevel level, std::string const& message, } if (level == LogLevel::FATAL && _fatal2stderr) { - // a fatal error. always print this on stderr, too. WriteStderr(level, message); @@ -457,10 +456,11 @@ std::string LogAppenderSyslog::details() { /// @brief build an appender object //////////////////////////////////////////////////////////////////////////////// -static LogAppender* buildAppender(std::string const& output, bool fatal2stderr, - std::string const& contentFilter, - std::unordered_set& existingAppenders) { - // first handle syslog-logging +static LogAppender* buildAppender( + std::string const& output, bool fatal2stderr, + std::string const& contentFilter, + std::unordered_set& existingAppenders) { +// first handle syslog-logging #ifdef TRI_ENABLE_SYSLOG if (StringUtils::isPrefix(output, "syslog://")) { auto s = StringUtils::split(output.substr(9), '/'); @@ -477,8 +477,8 @@ static LogAppender* buildAppender(std::string const& output, bool fatal2stderr, return new LogAppenderSyslog(s[0], s[1], contentFilter); } #endif - - // everything else must be file-based logging + + // everything else must be file-based logging std::string filename; if (output == "-" || output == "+") { filename = output; @@ -495,28 +495,28 @@ static LogAppender* buildAppender(std::string const& output, bool fatal2stderr, return true; } // treat stderr and stdout as one output filename - if (filename == "-" && + if (filename == "-" && existingAppenders.find("+") != existingAppenders.end()) { return true; } - if (filename == "+" && + if (filename == "+" && existingAppenders.find("-") != existingAppenders.end()) { return true; } return false; }; - + if (hasAppender(filename)) { // already have an appender for the same output return nullptr; } try { - std::unique_ptr appender(new LogAppenderFile(filename, fatal2stderr, contentFilter)); + std::unique_ptr appender( + new LogAppenderFile(filename, fatal2stderr, contentFilter)); existingAppenders.emplace(filename); return appender.release(); - } - catch (...) { + } catch (...) { // cannot open file for logging // try falling back to stderr instead if (hasAppender("-")) { @@ -693,8 +693,7 @@ static void QueueMessage(char const* function, char const* file, long int line, // now either queue or output the message if (ThreadedLogging.load(std::memory_order_relaxed)) { - auto msg = std::make_unique(level, topicId, - out.str(), offset); + auto msg = std::make_unique(level, topicId, out.str(), offset); try { MessageQueue.push(msg.get()); @@ -780,6 +779,7 @@ LoggerStream::~LoggerStream() { LogTopic Logger::COLLECTOR("collector"); LogTopic Logger::COMPACTOR("compactor"); +LogTopic Logger::DATAFILES("datafiles", LogLevel::INFO); LogTopic Logger::MMAP("mmap"); LogTopic Logger::PERFORMANCE("performance", LogLevel::FATAL); // suppress by default @@ -843,8 +843,9 @@ void Logger::addAppender(std::string const& definition, bool fatal2stderr, topic = it->second; } - - std::unique_ptr appender(buildAppender(output, f2s, contentFilter, existingAppenders)); + + std::unique_ptr appender( + buildAppender(output, f2s, contentFilter, existingAppenders)); if (appender == nullptr) { // cannot open appender or already have an appender for the channel diff --git a/lib/Basics/Logger.h b/lib/Basics/Logger.h index fefdaf302d..7fad86362c 100644 --- a/lib/Basics/Logger.h +++ b/lib/Basics/Logger.h @@ -169,9 +169,11 @@ namespace arangodb { //////////////////////////////////////////////////////////////////////////////// /// @brief LogLevel //////////////////////////////////////////////////////////////////////////////// + #ifdef TRI_UNDEF_ERR #undef ERR #endif + enum class LogLevel { DEFAULT = 0, FATAL = 1, @@ -276,6 +278,7 @@ class Logger { static LogTopic QUERIES; static LogTopic REPLICATION; static LogTopic REQUESTS; + static LogTopic DATAFILES; public: ////////////////////////////////////////////////////////////////////////////// diff --git a/lib/V8/v8-utils.cpp b/lib/V8/v8-utils.cpp index 704e8d4934..c3d7639010 100644 --- a/lib/V8/v8-utils.cpp +++ b/lib/V8/v8-utils.cpp @@ -28,7 +28,6 @@ #include "v8-utils.h" #include "v8-buffer.h" -#include #include #include @@ -228,14 +227,11 @@ static bool LoadJavaScriptDirectory(v8::Isolate* isolate, char const* path, bool execute, bool useGlobalContext) { v8::HandleScope scope(isolate); bool result; - regex_t re; LOG(TRACE) << "loading JavaScript directory: '" << path << "'"; std::vector files = TRI_FilesDirectory(path); - regcomp(&re, "^(.*)\\.js$", REG_ICASE | REG_EXTENDED); - result = true; for (auto const& filename : files) { @@ -243,7 +239,7 @@ static bool LoadJavaScriptDirectory(v8::Isolate* isolate, char const* path, bool ok; char* full; - if (regexec(&re, filename.c_str(), 0, 0, 0) != 0) { + if (!StringUtils::isSuffix(filename, ".js")) { continue; } @@ -263,7 +259,6 @@ static bool LoadJavaScriptDirectory(v8::Isolate* isolate, char const* path, } } } - regfree(&re); return result; } @@ -1612,7 +1607,7 @@ static void JS_Load(v8::FunctionCallbackInfo const& args) { result = TRI_ExecuteJavaScriptString(isolate, isolate->GetCurrentContext(), TRI_V8_PAIR_STRING(content, length), filename->ToString(), false); - + TRI_FreeString(TRI_UNKNOWN_MEM_ZONE, content); // restore old values for __dirname and __filename