@@ -1839,6 +1839,8 @@ static void load_elf_image(const char *image_name, int image_fd,
info->pt_dynamic_addr = 0;
#endif
+ mmap_lock();
+
/* Find the maximum size of the image and allocate an appropriate
amount of memory to handle that. */
loaddr = -1, hiaddr = 0;
@@ -1999,6 +2001,8 @@ static void load_elf_image(const char *image_name, int image_fd,
load_symbols(ehdr, image_fd, load_bias);
}
+ mmap_unlock();
+
close(image_fd);
return;
@@ -453,6 +453,10 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
void **lp;
int i;
+ if (alloc) {
+ assert_memory_lock();
+ }
+
/* Level 1. Always allocated. */
lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
@@ -819,6 +823,8 @@ static TranslationBlock *tb_alloc(target_ulong pc)
{
TranslationBlock *tb;
+ assert_tb_lock();
+
if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
return NULL;
}
@@ -831,6 +837,8 @@ static TranslationBlock *tb_alloc(target_ulong pc)
/* Called with tb_lock held. */
void tb_free(TranslationBlock *tb)
{
+ assert_tb_lock();
+
/* In practice this is mostly used for single use temporary TB
Ignore the hard cases and just back up if this TB happens to
be the last one generated. */
@@ -1047,6 +1055,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
uint32_t h;
tb_page_addr_t phys_pc;
+ assert_tb_lock();
+
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
h = tb_hash_func(phys_pc, tb->pc, tb->flags);
@@ -1125,6 +1135,8 @@ static inline void tb_alloc_page(TranslationBlock *tb,
bool page_already_protected;
#endif
+ assert_memory_lock();
+
tb->page_addr[n] = page_addr;
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb;
@@ -1181,6 +1193,8 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
{
uint32_t h;
+ assert_memory_lock();
+
/* add in the hash table */
h = tb_hash_func(phys_pc, tb->pc, tb->flags);
qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
@@ -1212,6 +1226,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
+ assert_memory_lock();
phys_pc = get_page_addr_code(env, pc);
if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
@@ -1339,6 +1354,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
*/
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
+ assert_memory_lock();
+
while (start < end) {
tb_invalidate_phys_page_range(start, end, 0);
start &= TARGET_PAGE_MASK;
@@ -1375,6 +1392,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
uint32_t current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */
+ assert_memory_lock();
+
p = page_find(start >> TARGET_PAGE_BITS);
if (!p) {
return;
@@ -1972,6 +1991,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif
assert(start < end);
+ assert_memory_lock();
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
This adds calls to the assert_memory_lock for all public APIs which are documented as holding the mmap_lock for user-mode. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> --- linux-user/elfload.c | 4 ++++ translate-all.c | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+) -- 2.7.4