Merge branch 'mt/threaded-grep-in-object-store'
Traditionally, we avoided threaded grep while searching in objects (as opposed to files in the working tree) as accesses to the object layer is not thread-safe. This limitation is getting lifted. * mt/threaded-grep-in-object-store: grep: use no. of cores as the default no. of threads grep: move driver pre-load out of critical section grep: re-enable threads in non-worktree case grep: protect packed_git [re-]initialization grep: allow submodule functions to run in parallel submodule-config: add skip_if_read option to repo_read_gitmodules() grep: replace grep_read_mutex by internal obj read lock object-store: allow threaded access to object reading replace-object: make replace operations thread-safe grep: fix racy calls in grep_objects() grep: fix race conditions at grep_submodule() grep: fix race conditions on userdiff calls
This commit is contained in:
@ -6,6 +6,7 @@
|
||||
#include "list.h"
|
||||
#include "sha1-array.h"
|
||||
#include "strbuf.h"
|
||||
#include "thread-utils.h"
|
||||
|
||||
struct object_directory {
|
||||
struct object_directory *next;
|
||||
@ -125,6 +126,8 @@ struct raw_object_store {
|
||||
* (see git-replace(1)).
|
||||
*/
|
||||
struct oidmap *replace_map;
|
||||
unsigned replace_map_initialized : 1;
|
||||
pthread_mutex_t replace_mutex; /* protect object replace functions */
|
||||
|
||||
struct commit_graph *commit_graph;
|
||||
unsigned commit_graph_attempted : 1; /* if loading has been attempted */
|
||||
@ -257,6 +260,40 @@ int has_loose_object_nonlocal(const struct object_id *);
|
||||
|
||||
void assert_oid_type(const struct object_id *oid, enum object_type expect);
|
||||
|
||||
/*
|
||||
* Enabling the object read lock allows multiple threads to safely call the
|
||||
* following functions in parallel: repo_read_object_file(), read_object_file(),
|
||||
* read_object_file_extended(), read_object_with_reference(), read_object(),
|
||||
* oid_object_info() and oid_object_info_extended().
|
||||
*
|
||||
* obj_read_lock() and obj_read_unlock() may also be used to protect other
|
||||
* section which cannot execute in parallel with object reading. Since the used
|
||||
* lock is a recursive mutex, these sections can even contain calls to object
|
||||
* reading functions. However, beware that in these cases zlib inflation won't
|
||||
* be performed in parallel, losing performance.
|
||||
*
|
||||
* TODO: oid_object_info_extended()'s call stack has a recursive behavior. If
|
||||
* any of its callees end up calling it, this recursive call won't benefit from
|
||||
* parallel inflation.
|
||||
*/
|
||||
void enable_obj_read_lock(void);
|
||||
void disable_obj_read_lock(void);
|
||||
|
||||
extern int obj_read_use_lock;
|
||||
extern pthread_mutex_t obj_read_mutex;
|
||||
|
||||
static inline void obj_read_lock(void)
|
||||
{
|
||||
if(obj_read_use_lock)
|
||||
pthread_mutex_lock(&obj_read_mutex);
|
||||
}
|
||||
|
||||
static inline void obj_read_unlock(void)
|
||||
{
|
||||
if(obj_read_use_lock)
|
||||
pthread_mutex_unlock(&obj_read_mutex);
|
||||
}
|
||||
|
||||
struct object_info {
|
||||
/* Request */
|
||||
enum object_type *typep;
|
||||
|
Reference in New Issue
Block a user