Hi Juergen,
On 25/11/2020 10:51, Juergen Gross wrote:
-static struct evtchn_fifo_queue *lock_old_queue(const struct domain *d,
- struct evtchn *evtchn,
- unsigned long *flags)
-{
- struct vcpu *v;
- struct evtchn_fifo_queue *q, *old_q;
- unsigned int try;
- union evtchn_fifo_lastq lastq;
-
- for ( try = 0; try < 3; try++ )
- {
- lastq.raw = read_atomic(&evtchn->fifo_lastq);
- v = d->vcpu[lastq.last_vcpu_id];
- old_q = &v->evtchn_fifo->queue[lastq.last_priority];
-
- spin_lock_irqsave(&old_q->lock, *flags);
-
- v = d->vcpu[lastq.last_vcpu_id];
- q = &v->evtchn_fifo->queue[lastq.last_priority];
-
- if ( old_q == q )
- return old_q;
-
- spin_unlock_irqrestore(&old_q->lock, *flags);
- }
-
- gprintk(XENLOG_WARNING,
- "dom%d port %d lost event (too many queue changes)\n",
- d->domain_id, evtchn->port);
- return NULL;
-}
-
static int try_set_link(event_word_t *word, event_word_t *w, uint32_t link)
{
event_word_t new, old;
@@ -190,6 +158,9 @@ static void evtchn_fifo_set_pending(struct vcpu *v, struct
evtchn *evtchn)
event_word_t *word;
unsigned long flags;
bool_t was_pending;
+ struct evtchn_fifo_queue *q, *old_q;
+ unsigned int try;
+ bool linked = true;
port = evtchn->port;
word = evtchn_fifo_word_from_port(d, port);
@@ -204,17 +175,67 @@ static void evtchn_fifo_set_pending(struct vcpu *v,
struct evtchn *evtchn)
return;
}
+ /*
+ * Lock all queues related to the event channel (in case of a queue change
+ * this might be two).
+ * It is mandatory to do that before setting and testing the PENDING bit
+ * and to hold the current queue lock until the event has put into the
+ * list of pending events in order to avoid waking up a guest without the
+ * event being visibly pending in the guest.
+ */
+ for ( try = 0; try < 4; try++ )
May I ask why the number of try is 4 rather than the original 3?
+ {
+ union evtchn_fifo_lastq lastq;
+ const struct vcpu *old_v;
+
+ lastq.raw = read_atomic(&evtchn->fifo_lastq);
+ old_v = d->vcpu[lastq.last_vcpu_id];
+
+ q = &v->evtchn_fifo->queue[evtchn->priority];
+ old_q = &old_v->evtchn_fifo->queue[lastq.last_priority];
+
+ if ( q == old_q )
+ spin_lock_irqsave(&q->lock, flags);
+ else if ( q < old_q )
+ {
+ spin_lock_irqsave(&q->lock, flags);
+ spin_lock(&old_q->lock);
+ }
+ else
+ {
+ spin_lock_irqsave(&old_q->lock, flags);
+ spin_lock(&q->lock);
+ }
+
+ lastq.raw = read_atomic(&evtchn->fifo_lastq);
+ old_v = d->vcpu[lastq.last_vcpu_id];
+ if ( q == &v->evtchn_fifo->queue[evtchn->priority] &&
+ old_q == &old_v->evtchn_fifo->queue[lastq.last_priority] )
+ break;
+
+ if ( q != old_q )
+ spin_unlock(&old_q->lock);
+ spin_unlock_irqrestore(&q->lock, flags);
+ }
+
was_pending = guest_test_and_set_bit(d, EVTCHN_FIFO_PENDING, word);
+ /* If we didn't get the lock bail out. */
+ if ( try == 4 )
+ {
+ gprintk(XENLOG_WARNING,
+ "dom%d port %d lost event (too many queue changes)\n",
+ d->domain_id, evtchn->port);
NIT: You can use %pd use in place of dom%d.
Cheers,
--
Julien Grall