@@ -24,23 +24,13 @@ static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
2424static int coalesced_mmio_in_range (struct kvm_coalesced_mmio_dev * dev ,
2525 gpa_t addr , int len )
2626{
27- struct kvm_coalesced_mmio_zone * zone ;
28- int i ;
29-
30- /* is it in a batchable area ? */
31-
32- for (i = 0 ; i < dev -> nb_zones ; i ++ ) {
33- zone = & dev -> zone [i ];
34-
35- /* (addr,len) is fully included in
36- * (zone->addr, zone->size)
37- */
27+ /* is it in a batchable area ?
28+ * (addr,len) is fully included in
29+ * (zone->addr, zone->size)
30+ */
3831
39- if (zone -> addr <= addr &&
40- addr + len <= zone -> addr + zone -> size )
41- return 1 ;
42- }
43- return 0 ;
32+ return (dev -> zone .addr <= addr &&
33+ addr + len <= dev -> zone .addr + dev -> zone .size );
4434}
4535
4636static int coalesced_mmio_has_room (struct kvm_coalesced_mmio_dev * dev )
@@ -73,10 +63,10 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
7363 if (!coalesced_mmio_in_range (dev , addr , len ))
7464 return - EOPNOTSUPP ;
7565
76- spin_lock (& dev -> lock );
66+ spin_lock (& dev -> kvm -> ring_lock );
7767
7868 if (!coalesced_mmio_has_room (dev )) {
79- spin_unlock (& dev -> lock );
69+ spin_unlock (& dev -> kvm -> ring_lock );
8070 return - EOPNOTSUPP ;
8171 }
8272
@@ -87,14 +77,16 @@ static int coalesced_mmio_write(struct kvm_io_device *this,
8777 memcpy (ring -> coalesced_mmio [ring -> last ].data , val , len );
8878 smp_wmb ();
8979 ring -> last = (ring -> last + 1 ) % KVM_COALESCED_MMIO_MAX ;
90- spin_unlock (& dev -> lock );
80+ spin_unlock (& dev -> kvm -> ring_lock );
9181 return 0 ;
9282}
9383
9484static void coalesced_mmio_destructor (struct kvm_io_device * this )
9585{
9686 struct kvm_coalesced_mmio_dev * dev = to_mmio (this );
9787
88+ list_del (& dev -> list );
89+
9890 kfree (dev );
9991}
10092
@@ -105,39 +97,25 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
10597
10698int kvm_coalesced_mmio_init (struct kvm * kvm )
10799{
108- struct kvm_coalesced_mmio_dev * dev ;
109100 struct page * page ;
110101 int ret ;
111102
112103 ret = - ENOMEM ;
113104 page = alloc_page (GFP_KERNEL | __GFP_ZERO );
114105 if (!page )
115106 goto out_err ;
116- kvm -> coalesced_mmio_ring = page_address (page );
117107
118- ret = - ENOMEM ;
119- dev = kzalloc (sizeof (struct kvm_coalesced_mmio_dev ), GFP_KERNEL );
120- if (!dev )
121- goto out_free_page ;
122- spin_lock_init (& dev -> lock );
123- kvm_iodevice_init (& dev -> dev , & coalesced_mmio_ops );
124- dev -> kvm = kvm ;
125- kvm -> coalesced_mmio_dev = dev ;
126-
127- mutex_lock (& kvm -> slots_lock );
128- ret = kvm_io_bus_register_dev (kvm , KVM_MMIO_BUS , & dev -> dev );
129- mutex_unlock (& kvm -> slots_lock );
130- if (ret < 0 )
131- goto out_free_dev ;
108+ ret = 0 ;
109+ kvm -> coalesced_mmio_ring = page_address (page );
132110
133- return ret ;
111+ /*
112+ * We're using this spinlock to sync access to the coalesced ring.
113+ * The list doesn't need it's own lock since device registration and
114+ * unregistration should only happen when kvm->slots_lock is held.
115+ */
116+ spin_lock_init (& kvm -> ring_lock );
117+ INIT_LIST_HEAD (& kvm -> coalesced_zones );
134118
135- out_free_dev :
136- kvm -> coalesced_mmio_dev = NULL ;
137- kfree (dev );
138- out_free_page :
139- kvm -> coalesced_mmio_ring = NULL ;
140- __free_page (page );
141119out_err :
142120 return ret ;
143121}
@@ -151,51 +129,49 @@ void kvm_coalesced_mmio_free(struct kvm *kvm)
151129int kvm_vm_ioctl_register_coalesced_mmio (struct kvm * kvm ,
152130 struct kvm_coalesced_mmio_zone * zone )
153131{
154- struct kvm_coalesced_mmio_dev * dev = kvm -> coalesced_mmio_dev ;
132+ int ret ;
133+ struct kvm_coalesced_mmio_dev * dev ;
155134
156- if (dev == NULL )
157- return - ENXIO ;
135+ dev = kzalloc (sizeof (struct kvm_coalesced_mmio_dev ), GFP_KERNEL );
136+ if (!dev )
137+ return - ENOMEM ;
138+
139+ kvm_iodevice_init (& dev -> dev , & coalesced_mmio_ops );
140+ dev -> kvm = kvm ;
141+ dev -> zone = * zone ;
158142
159143 mutex_lock (& kvm -> slots_lock );
160- if (dev -> nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX ) {
161- mutex_unlock (& kvm -> slots_lock );
162- return - ENOBUFS ;
163- }
144+ ret = kvm_io_bus_register_dev (kvm , KVM_MMIO_BUS , & dev -> dev );
145+ if (ret < 0 )
146+ goto out_free_dev ;
147+ list_add_tail (& dev -> list , & kvm -> coalesced_zones );
148+ mutex_unlock (& kvm -> slots_lock );
164149
165- dev -> zone [dev -> nb_zones ] = * zone ;
166- dev -> nb_zones ++ ;
150+ return ret ;
167151
152+ out_free_dev :
168153 mutex_unlock (& kvm -> slots_lock );
154+
155+ kfree (dev );
156+
157+ if (dev == NULL )
158+ return - ENXIO ;
159+
169160 return 0 ;
170161}
171162
172163int kvm_vm_ioctl_unregister_coalesced_mmio (struct kvm * kvm ,
173164 struct kvm_coalesced_mmio_zone * zone )
174165{
175- int i ;
176- struct kvm_coalesced_mmio_dev * dev = kvm -> coalesced_mmio_dev ;
177- struct kvm_coalesced_mmio_zone * z ;
178-
179- if (dev == NULL )
180- return - ENXIO ;
166+ struct kvm_coalesced_mmio_dev * dev , * tmp ;
181167
182168 mutex_lock (& kvm -> slots_lock );
183169
184- i = dev -> nb_zones ;
185- while (i ) {
186- z = & dev -> zone [i - 1 ];
187-
188- /* unregister all zones
189- * included in (zone->addr, zone->size)
190- */
191-
192- if (zone -> addr <= z -> addr &&
193- z -> addr + z -> size <= zone -> addr + zone -> size ) {
194- dev -> nb_zones -- ;
195- * z = dev -> zone [dev -> nb_zones ];
170+ list_for_each_entry_safe (dev , tmp , & kvm -> coalesced_zones , list )
171+ if (coalesced_mmio_in_range (dev , zone -> addr , zone -> size )) {
172+ kvm_io_bus_unregister_dev (kvm , KVM_MMIO_BUS , & dev -> dev );
173+ kvm_iodevice_destructor (& dev -> dev );
196174 }
197- i -- ;
198- }
199175
200176 mutex_unlock (& kvm -> slots_lock );
201177
0 commit comments