200 lines
5.8 KiB
Diff
200 lines
5.8 KiB
Diff
This adds notifiers for phys memory changes: a set of callbacks that
|
|
vhost can register and update kernel accordingly. Down the road, kvm
|
|
code can be switched to use these as well, instead of calling kvm code
|
|
directly from exec.c as is done now.
|
|
|
|
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
|
|
---
|
|
cpu-common.h | 19 ++++++++++
|
|
exec.c | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
|
|
2 files changed, 130 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/cpu-common.h b/cpu-common.h
|
|
index 5e59564..326513d 100644
|
|
--- a/cpu-common.h
|
|
+++ b/cpu-common.h
|
|
@@ -8,6 +8,7 @@
|
|
#endif
|
|
|
|
#include "bswap.h"
|
|
+#include "qemu-queue.h"
|
|
|
|
/* address in the RAM (different from a physical address) */
|
|
typedef unsigned long ram_addr_t;
|
|
@@ -62,6 +63,24 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
|
|
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
|
|
void cpu_unregister_map_client(void *cookie);
|
|
|
|
+struct CPUPhysMemoryClient;
|
|
+typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
|
|
+struct CPUPhysMemoryClient {
|
|
+ void (*set_memory)(struct CPUPhysMemoryClient *client,
|
|
+ target_phys_addr_t start_addr,
|
|
+ ram_addr_t size,
|
|
+ ram_addr_t phys_offset);
|
|
+ int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
|
|
+ target_phys_addr_t start_addr,
|
|
+ target_phys_addr_t end_addr);
|
|
+ int (*migration_log)(struct CPUPhysMemoryClient *client,
|
|
+ int enable);
|
|
+ QLIST_ENTRY(CPUPhysMemoryClient) list;
|
|
+};
|
|
+
|
|
+void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
|
|
+void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
|
|
+
|
|
uint32_t ldub_phys(target_phys_addr_t addr);
|
|
uint32_t lduw_phys(target_phys_addr_t addr);
|
|
uint32_t ldl_phys(target_phys_addr_t addr);
|
|
diff --git a/exec.c b/exec.c
|
|
index 8f873ab..cbba15e 100644
|
|
--- a/exec.c
|
|
+++ b/exec.c
|
|
@@ -1640,6 +1640,101 @@ const CPULogItem cpu_log_items[] = {
|
|
{ 0, NULL, NULL },
|
|
};
|
|
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
|
|
+ = QLIST_HEAD_INITIALIZER(memory_client_list);
|
|
+
|
|
+static void cpu_notify_set_memory(target_phys_addr_t start_addr,
|
|
+ ram_addr_t size,
|
|
+ ram_addr_t phys_offset)
|
|
+{
|
|
+ CPUPhysMemoryClient *client;
|
|
+ QLIST_FOREACH(client, &memory_client_list, list) {
|
|
+ client->set_memory(client, start_addr, size, phys_offset);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
|
|
+ target_phys_addr_t end)
|
|
+{
|
|
+ CPUPhysMemoryClient *client;
|
|
+ QLIST_FOREACH(client, &memory_client_list, list) {
|
|
+ int r = client->sync_dirty_bitmap(client, start, end);
|
|
+ if (r < 0)
|
|
+ return r;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int cpu_notify_migration_log(int enable)
|
|
+{
|
|
+ CPUPhysMemoryClient *client;
|
|
+ QLIST_FOREACH(client, &memory_client_list, list) {
|
|
+ int r = client->migration_log(client, enable);
|
|
+ if (r < 0)
|
|
+ return r;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
|
|
+ CPUPhysMemoryClient *client)
|
|
+{
|
|
+ PhysPageDesc *pd;
|
|
+ int l1, l2;
|
|
+
|
|
+ for (l1 = 0; l1 < L1_SIZE; ++l1) {
|
|
+ pd = phys_map[l1];
|
|
+ if (!pd) {
|
|
+ continue;
|
|
+ }
|
|
+ for (l2 = 0; l2 < L2_SIZE; ++l2) {
|
|
+ if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
|
|
+ continue;
|
|
+ }
|
|
+ client->set_memory(client, pd[l2].region_offset,
|
|
+ TARGET_PAGE_SIZE, pd[l2].phys_offset);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static void phys_page_for_each(CPUPhysMemoryClient *client)
|
|
+{
|
|
+#if TARGET_PHYS_ADDR_SPACE_BITS > 32
|
|
+
|
|
+#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
|
|
+#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
|
|
+#endif
|
|
+ void **phys_map = (void **)l1_phys_map;
|
|
+ int l1;
|
|
+ if (!l1_phys_map) {
|
|
+ return;
|
|
+ }
|
|
+ for (l1 = 0; l1 < L1_SIZE; ++l1) {
|
|
+ if (phys_map[l1]) {
|
|
+ phys_page_for_each_in_l1_map(phys_map[l1], client);
|
|
+ }
|
|
+ }
|
|
+#else
|
|
+ if (!l1_phys_map) {
|
|
+ return;
|
|
+ }
|
|
+ phys_page_for_each_in_l1_map(l1_phys_map, client);
|
|
+#endif
|
|
+}
|
|
+
|
|
+void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
|
|
+{
|
|
+ QLIST_INSERT_HEAD(&memory_client_list, client, list);
|
|
+ phys_page_for_each(client);
|
|
+}
|
|
+
|
|
+void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
|
|
+{
|
|
+ QLIST_REMOVE(client, list);
|
|
+}
|
|
+#endif
|
|
+
|
|
static int cmp1(const char *s1, int n, const char *s2)
|
|
{
|
|
if (strlen(s2) != n)
|
|
@@ -1899,10 +1994,16 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
|
|
|
int cpu_physical_memory_set_dirty_tracking(int enable)
|
|
{
|
|
+ int ret = 0;
|
|
+ in_migration = enable;
|
|
if (kvm_enabled()) {
|
|
- return kvm_set_migration_log(enable);
|
|
+ ret = kvm_set_migration_log(enable);
|
|
}
|
|
- return 0;
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
+ }
|
|
+ ret = cpu_notify_migration_log(!!enable);
|
|
+ return ret;
|
|
}
|
|
|
|
int cpu_physical_memory_get_dirty_tracking(void)
|
|
@@ -1915,8 +2016,13 @@ int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
|
|
{
|
|
int ret = 0;
|
|
|
|
- if (kvm_enabled())
|
|
+ if (kvm_enabled()) {
|
|
ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
|
|
+ }
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
+ }
|
|
+ ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
|
|
return ret;
|
|
}
|
|
|
|
@@ -2331,6 +2437,8 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
|
|
if (kvm_enabled())
|
|
kvm_set_phys_mem(start_addr, size, phys_offset);
|
|
|
|
+ cpu_notify_set_memory(start_addr, size, phys_offset);
|
|
+
|
|
if (phys_offset == IO_MEM_UNASSIGNED) {
|
|
region_offset = start_addr;
|
|
}
|
|
--
|
|
1.6.6.144.g5c3af
|