J'ai une question sur la façon dont j'utilise les objets boost :: mutex dans mon application Linux C++. J'ai une collection de macros de commodité pour effectuer diverses opérations mutex, par exemple j'ai des macros qui retournent si un mutex est verrouillé ou non par un thread, que l'un soit déjà verrouillé ou non par le thread appelant, et plusieurs autres. Les macros qui ont besoin de savoir quel fil (le cas échéant) a actuellement le mutex verrouillé sont les suivantes:Est-il sûr de lire directement le champ propriétaire d'un mutex pthread?
// Determine whether or not a boost mutex is already locked
#define IsBoostMutexLocked(m) ((m.native_handle()->__data.__owner) != 0)
// Determine whether or not a boost mutex is already locked by the calling thread
#define IsBoostMutexLockedByCallingThread(m) ((m.native_handle()->__data.__owner) == (syscall(SYS_gettid)))
Mais je commençais à me demander si oui ou non il est sûr de lire directement le champ int __owner. Un thread peut-il ne pas essayer de lire le champ __owner alors qu'un autre thread est occupé à verrouiller ou déverrouiller le mutex, écrivant ainsi dans le champ __owner? J'ai donc conçu un test qui tenterait d'exposer toutes les vulnérabilités de la course de données et d'abandonner si une telle situation était détectée. Jusqu'à présent, j'ai eu 100 threads tous en même temps verrouillant, déverrouillant, et lisant __owner d'un mutex global pour des dizaines de millions d'itérations de boucle par thread et pas une fois j'ai jamais lu une valeur __owner invalide. Ci-dessous, j'ai inclus l'intégralité du code de test. Franchement, je suis surpris que je n'ai jamais lu une mauvaise valeur __owner. Est-ce que quelqu'un peut m'expliquer pourquoi il est (apparemment) sûr de lire directement le __owner d'un mutex même quand d'autres threads essayent de le verrouiller/déverrouiller? Merci d'avance!
// The test mutex
boost::mutex g_TestMutex;
// The number of threads to launch for the test
#define NUM_THREADS_TO_LAUNCH 100
// The thread IDs of all test threads
long int g_AllSpecialThreadsTIDs[NUM_THREADS_TO_LAUNCH];
// Whether or not each test thread is ready to begin the test
std::atomic<bool> g_bEachTestThreadIsReadyToBegin[NUM_THREADS_TO_LAUNCH];
// Whether or not the test is ready to begin
std::atomic<bool> g_bTestReadyToBegin(false);
// A structure that encapsulates data to be passed to each test thread
typedef struct {
long *pStoreTIDLoc; // A pointer to the variable at which to store the thread ID
std::atomic<bool> *pTIDStoredLoc; // A pointer to the variable at which to store the status of whether or not the thread ID has been set
} TestThreadDataStructure;
// Ensure that a test thread ID is valid
void AssertIsValidTID(int iTID)
{
// Whether or not this thread ID is valid
bool bValid = false;
// If the thread ID indicates that no-one has locked the mutex
if (iTID == 0)
{
// A thread ID indicating that no-one has locked the mutex is always valid
bValid = true;
}
// Or, if this is a non-zero thread ID
else
{
// For each test thread
for (int i = 0; i < NUM_THREADS_TO_LAUNCH; i++)
{
// If this is a thread ID match
if (iTID == static_cast<int>(g_AllSpecialThreadsTIDs[i]))
{
// Set that the incoming thread ID is valid
bValid = true;
// Stop looking
break;
}
}
}
// If the incoming thread ID is invalid
if (!bValid)
{
// The test has failed
abort();
}
}
// Each test thread
void TestMutexTesterThread(void *pArg)
{
// Each mutex owner thread ID
int iOwner = 0;
// Unpack the incoming data structure
TestThreadDataStructure *pStruct = ((TestThreadDataStructure *)pArg);
long int *pStoreHere = pStruct->pStoreTIDLoc;
std::atomic<bool> *pTIDStoredLoc = pStruct->pTIDStoredLoc;
// Clean up
delete pStruct;
pStruct = NULL;
pArg = NULL;
// Get this thread ID
const long int lThisTID = syscall(SYS_gettid);
// Store this thread ID
(*pStoreHere) = lThisTID;
// Set that we have finished storing the thread ID
pTIDStoredLoc->store(true);
// While we are waiting for everything to be ready so that we can begin the test
while (true)
{
// If we are now ready to begin the test
if (g_bTestReadyToBegin.load())
{
// Stop waiting
break;
}
}
// The loop iteration count
uint64_t uCount = 0;
// For the life of the test, i.e. forever
while (true)
{
// Increment the count
uCount++;
// If we are about to go over the edge
if (uCount >= (UINT64_MAX - 1))
{
// Reset the count
uCount = 0;
}
// Every so often
if ((uCount % 500000) == 0)
{
// Print our progress
printf("Thread %05ld: uCount = %lu\n", lThisTID, uCount);
}
// Get the mutex owner's thread ID
iOwner = g_TestMutex.native_handle()->__data.__owner;
// Ensure that this is a valid thread ID
AssertIsValidTID(iOwner);
// Lock the mutex as part of the test
g_TestMutex.lock();
// Get the mutex owner's thread ID
iOwner = g_TestMutex.native_handle()->__data.__owner;
// Ensure that this is a valid thread ID
AssertIsValidTID(iOwner);
// Unlock the mutex as part of the test
g_TestMutex.unlock();
// Get the mutex owner's thread ID
iOwner = g_TestMutex.native_handle()->__data.__owner;
// Ensure that this is a valid thread ID
AssertIsValidTID(iOwner);
}
}
// Start the test
void StartTest()
{
// For each thread to launch
for (int i = 0; i < NUM_THREADS_TO_LAUNCH; i++)
{
// Initialize that we do not have a thread ID yet
g_AllSpecialThreadsTIDs[i] = 0;
g_bEachTestThreadIsReadyToBegin[i].store(false);
}
// For each thread to launch
for (int i = 0; i < NUM_THREADS_TO_LAUNCH; i++)
{
// Allocate a data structure with which to pass data to each thread
TestThreadDataStructure *pDataStruct = new TestThreadDataStructure;
// Store the location at which the thread should place its thread ID
pDataStruct->pStoreTIDLoc = ((long int *)((&(g_AllSpecialThreadsTIDs[i]))));
// Store the location of the atomic variable that each thread should set to true when it has finished storing its thread ID
pDataStruct->pTIDStoredLoc = ((std::atomic<bool> *)((&(g_bEachTestThreadIsReadyToBegin[i]))));
// The thread to return
boost::thread *pNewThread = NULL;
// Launch the new thread
try { pNewThread = new boost::thread(TestMutexTesterThread, pDataStruct); }
// Catch errors
catch (boost::thread_resource_error &ResourceError)
{
// Print this error
printf("boost::thread construction error: '%s'", ResourceError.what());
// This is a fatal error
abort();
}
// Clean up
delete pNewThread;
pNewThread = NULL;
}
// Whether or not all threads are ready to begin
bool bAllThreadsReadyToBegin = false;
// While we are waiting for all threads to be ready to begin
while (true)
{
// Reset to assuming all threads are ready to begin
bAllThreadsReadyToBegin = true;
// For each thread we launched
for (int i = 0; i < NUM_THREADS_TO_LAUNCH; i++)
{
// If this thread has not yet stored its thread ID
if (g_bEachTestThreadIsReadyToBegin[i].load() == false)
{
// We are not yet ready to begin
bAllThreadsReadyToBegin = false;
// Start over
break;
}
}
// If all threads are ready to begin
if (bAllThreadsReadyToBegin)
{
// We are done waiting
break;
}
}
// Atomically store that all threads are ready to begin and that the test should proceed
g_bTestReadyToBegin.store(true);
}
La question de savoir si un mutex est actuellement verrouillé est pratiquement insignifiante, car elle ne vous donne fondamentalement aucune information sur quoi que ce soit. – nwp
https://stackoverflow.com/q/21892934/841108 est une question très similaire. –
Sur quelle architecture utilisez-vous ceci - x86? Et combien de prises, et combien de cœurs? – Useless