debuggers.hg
changeset 4649:ebeac8efe955
bitkeeper revision 1.1350 (42676ee4BkgqwvPiIyB44k55uY8cSA)
Fix blkdev suspend/resume.
Signed-off-by: Keir Fraser <keir@xensource.com>
Fix blkdev suspend/resume.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Apr 21 09:14:12 2005 +0000 (2005-04-21) |
parents | a1c65fd28aec |
children | 319e2634476d |
files | linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c |
line diff
1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c Thu Apr 21 00:16:35 2005 +0000 1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c Thu Apr 21 09:14:12 2005 +0000 1.3 @@ -330,34 +330,36 @@ int gnttab_resume(void) 1.4 setup.nr_frames = NR_GRANT_FRAMES; 1.5 setup.frame_list = frames; 1.6 1.7 - if ( HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0 ) 1.8 - BUG(); 1.9 - if ( setup.status != 0 ) 1.10 - BUG(); 1.11 + BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0); 1.12 + BUG_ON(setup.status != 0); 1.13 1.14 for ( i = 0; i < NR_GRANT_FRAMES; i++ ) 1.15 set_fixmap_ma(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT); 1.16 1.17 + return 0; 1.18 +} 1.19 + 1.20 +int gnttab_suspend(void) 1.21 +{ 1.22 + int i; 1.23 + 1.24 + for ( i = 0; i < NR_GRANT_FRAMES; i++ ) 1.25 + clear_fixmap(FIX_GNTTAB_END - i); 1.26 + 1.27 + return 0; 1.28 +} 1.29 + 1.30 +static int __init gnttab_init(void) 1.31 +{ 1.32 + int i; 1.33 + 1.34 + BUG_ON(gnttab_resume()); 1.35 + 1.36 shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END); 1.37 1.38 for ( i = 0; i < NR_GRANT_ENTRIES; i++ ) 1.39 gnttab_free_list[i] = i + 1; 1.40 1.41 - return 0; 1.42 -} 1.43 - 1.44 -int gnttab_suspend(void) 1.45 -{ 1.46 - int i; 1.47 - for ( i = 0; i < NR_GRANT_FRAMES; i++ ) 1.48 - clear_fixmap(FIX_GNTTAB_END - i); 1.49 - return 0; 1.50 -} 1.51 - 1.52 -static int __init gnttab_init(void) 1.53 -{ 1.54 - BUG_ON(gnttab_resume()); 1.55 - 1.56 /* 1.57 * /proc/xen/grant : used by libxc to access grant tables 1.58 */
2.1 --- a/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Apr 21 00:16:35 2005 +0000 2.2 +++ b/linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Apr 21 09:14:12 2005 +0000 2.3 @@ -94,37 +94,38 @@ static domid_t rdomid = 0; 2.4 static grant_ref_t gref_head, gref_terminal; 2.5 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ 2.6 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE) 2.7 +#define GRANTREF_INVALID (1<<15) 2.8 #endif 2.9 2.10 -unsigned long rec_ring_free; 2.11 -blkif_request_t rec_ring[BLK_RING_SIZE]; 2.12 +static struct blk_shadow { 2.13 + blkif_request_t req; 2.14 + unsigned long request; 2.15 + unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 2.16 +} blk_shadow[BLK_RING_SIZE]; 2.17 +unsigned long blk_shadow_free; 2.18 2.19 -static int recovery = 0; /* "Recovery in progress" flag. Protected 2.20 - * by the blkif_io_lock */ 2.21 +static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */ 2.22 2.23 static void kick_pending_request_queues(void); 2.24 2.25 int __init xlblk_init(void); 2.26 2.27 -void blkif_completion( blkif_request_t *req ); 2.28 - 2.29 -static inline int GET_ID_FROM_FREELIST( void ) 2.30 -{ 2.31 - unsigned long free = rec_ring_free; 2.32 +static void blkif_completion(struct blk_shadow *s); 2.33 2.34 +static inline int GET_ID_FROM_FREELIST(void) 2.35 +{ 2.36 + unsigned long free = blk_shadow_free; 2.37 BUG_ON(free > BLK_RING_SIZE); 2.38 - 2.39 - rec_ring_free = rec_ring[free].id; 2.40 - 2.41 - rec_ring[free].id = 0x0fffffee; /* debug */ 2.42 - 2.43 + blk_shadow_free = blk_shadow[free].req.id; 2.44 + blk_shadow[free].req.id = 0x0fffffee; /* debug */ 2.45 return free; 2.46 } 2.47 2.48 -static inline void ADD_ID_TO_FREELIST( unsigned long id ) 2.49 +static inline void ADD_ID_TO_FREELIST(unsigned long id) 2.50 { 2.51 - rec_ring[id].id = rec_ring_free; 2.52 - rec_ring_free = id; 2.53 + blk_shadow[id].req.id = blk_shadow_free; 2.54 + blk_shadow[id].request = 0; 2.55 + blk_shadow_free = id; 2.56 } 2.57 2.58 2.59 @@ -138,41 +139,31 @@ static int sg_operation = -1; 2.60 #define DISABLE_SCATTERGATHER() (sg_operation = -1) 2.61 #endif 2.62 2.63 -static inline void translate_req_to_pfn(blkif_request_t *xreq, 2.64 - blkif_request_t *req) 2.65 +static inline void pickle_request(struct blk_shadow *s, blkif_request_t *r) 2.66 { 2.67 +#ifndef CONFIG_XEN_BLKDEV_GRANT 2.68 int i; 2.69 +#endif 2.70 2.71 - xreq->operation = req->operation; 2.72 - xreq->nr_segments = req->nr_segments; 2.73 - xreq->device = req->device; 2.74 - /* preserve id */ 2.75 - xreq->sector_number = req->sector_number; 2.76 + s->req = *r; 2.77 2.78 - for ( i = 0; i < req->nr_segments; i++ ) 2.79 -#ifdef CONFIG_XEN_BLKDEV_GRANT 2.80 - xreq->frame_and_sects[i] = req->frame_and_sects[i]; 2.81 -#else 2.82 - xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]); 2.83 +#ifndef CONFIG_XEN_BLKDEV_GRANT 2.84 + for ( i = 0; i < r->nr_segments; i++ ) 2.85 + s->req.frame_and_sects[i] = machine_to_phys(r->frame_and_sects[i]); 2.86 #endif 2.87 } 2.88 2.89 -static inline void translate_req_to_mfn(blkif_request_t *xreq, 2.90 - blkif_request_t *req) 2.91 +static inline void unpickle_request(blkif_request_t *r, struct blk_shadow *s) 2.92 { 2.93 +#ifndef CONFIG_XEN_BLKDEV_GRANT 2.94 int i; 2.95 +#endif 2.96 2.97 - xreq->operation = req->operation; 2.98 - xreq->nr_segments = req->nr_segments; 2.99 - xreq->device = req->device; 2.100 - xreq->id = req->id; /* copy id (unlike above) */ 2.101 - xreq->sector_number = req->sector_number; 2.102 + *r = s->req; 2.103 2.104 - for ( i = 0; i < req->nr_segments; i++ ) 2.105 -#ifdef CONFIG_XEN_BLKDEV_GRANT 2.106 - xreq->frame_and_sects[i] = req->frame_and_sects[i]; 2.107 -#else 2.108 - xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]); 2.109 +#ifndef CONFIG_XEN_BLKDEV_GRANT 2.110 + for ( i = 0; i < s->req.nr_segments; i++ ) 2.111 + r->frame_and_sects[i] = phys_to_machine(s->req.frame_and_sects[i]); 2.112 #endif 2.113 } 2.114 2.115 @@ -185,8 +176,6 @@ static inline void flush_requests(void) 2.116 } 2.117 2.118 2.119 - 2.120 - 2.121 /************************** KERNEL VERSION 2.6 **************************/ 2.122 2.123 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 2.124 @@ -208,7 +197,6 @@ static void vbd_update(void) 2.125 2.126 static void kick_pending_request_queues(void) 2.127 { 2.128 - 2.129 if ( (xlbd_blk_queue != NULL) && 2.130 test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) ) 2.131 { 2.132 @@ -218,7 +206,6 @@ static void kick_pending_request_queues( 2.133 */ 2.134 xlbd_blk_queue->request_fn(xlbd_blk_queue); 2.135 } 2.136 - 2.137 } 2.138 2.139 2.140 @@ -243,9 +230,8 @@ int blkif_release(struct inode *inode, s 2.141 * When usage drops to zero it may allow more VBD updates to occur. 2.142 * Update of usage count is protected by a per-device semaphore. 2.143 */ 2.144 - if (--di->mi->usage == 0) { 2.145 + if ( --di->mi->usage == 0 ) 2.146 vbd_update(); 2.147 - } 2.148 2.149 return 0; 2.150 } 2.151 @@ -259,8 +245,8 @@ int blkif_ioctl(struct inode *inode, str 2.152 DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n", 2.153 command, (long)argument, inode->i_rdev); 2.154 2.155 - switch (command) { 2.156 - 2.157 + switch ( command ) 2.158 + { 2.159 case HDIO_GETGEO: 2.160 /* return ENOSYS to use defaults */ 2.161 return -ENOSYS; 2.162 @@ -312,7 +298,7 @@ static int blkif_queue_request(struct re 2.163 /* Fill out a communications ring structure. */ 2.164 ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt); 2.165 id = GET_ID_FROM_FREELIST(); 2.166 - rec_ring[id].id = (unsigned long) req; 2.167 + blk_shadow[id].request = (unsigned long)req; 2.168 2.169 ring_req->id = id; 2.170 ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : 2.171 @@ -341,8 +327,12 @@ static int blkif_queue_request(struct re 2.172 buffer_ma >> PAGE_SHIFT, 2.173 rq_data_dir(req) ); 2.174 2.175 + blk_shadow[id].frame[ring_req->nr_segments] = 2.176 + buffer_ma >> PAGE_SHIFT; 2.177 + 2.178 ring_req->frame_and_sects[ring_req->nr_segments++] = 2.179 (((u32) ref) << 16) | (fsect << 3) | lsect; 2.180 + 2.181 #else 2.182 ring_req->frame_and_sects[ring_req->nr_segments++] = 2.183 buffer_ma | (fsect << 3) | lsect; 2.184 @@ -353,7 +343,7 @@ static int blkif_queue_request(struct re 2.185 blk_ring.req_prod_pvt++; 2.186 2.187 /* Keep a private copy so we can reissue requests when recovering. */ 2.188 - translate_req_to_pfn(&rec_ring[id], ring_req); 2.189 + pickle_request(&blk_shadow[id], ring_req); 2.190 2.191 return 0; 2.192 } 2.193 @@ -372,8 +362,10 @@ void do_blkif_request(request_queue_t *r 2.194 2.195 queued = 0; 2.196 2.197 - while ((req = elv_next_request(rq)) != NULL) { 2.198 - if (!blk_fs_request(req)) { 2.199 + while ( (req = elv_next_request(rq)) != NULL ) 2.200 + { 2.201 + if ( !blk_fs_request(req) ) 2.202 + { 2.203 end_request(req, 0); 2.204 continue; 2.205 } 2.206 @@ -383,19 +375,23 @@ void do_blkif_request(request_queue_t *r 2.207 blk_stop_queue(rq); 2.208 break; 2.209 } 2.210 + 2.211 DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n", 2.212 req, req->cmd, req->sector, req->current_nr_sectors, 2.213 req->nr_sectors, req->buffer, 2.214 rq_data_dir(req) ? "write" : "read"); 2.215 + 2.216 blkdev_dequeue_request(req); 2.217 - if (blkif_queue_request(req)) { 2.218 + if ( blkif_queue_request(req) ) 2.219 + { 2.220 blk_stop_queue(rq); 2.221 break; 2.222 } 2.223 + 2.224 queued++; 2.225 } 2.226 2.227 - if (queued != 0) 2.228 + if ( queued != 0 ) 2.229 flush_requests(); 2.230 } 2.231 2.232 @@ -424,11 +420,12 @@ static irqreturn_t blkif_int(int irq, vo 2.233 unsigned long id; 2.234 2.235 bret = RING_GET_RESPONSE(&blk_ring, i); 2.236 - id = bret->id; 2.237 - req = (struct request *)rec_ring[id].id; 2.238 - blkif_completion( &rec_ring[id] ); 2.239 + id = bret->id; 2.240 + req = (struct request *)blk_shadow[id].request; 2.241 2.242 - ADD_ID_TO_FREELIST(id); /* overwrites req */ 2.243 + blkif_completion(&blk_shadow[id]); 2.244 + 2.245 + ADD_ID_TO_FREELIST(id); 2.246 2.247 switch ( bret->operation ) 2.248 { 2.249 @@ -437,7 +434,7 @@ static irqreturn_t blkif_int(int irq, vo 2.250 if ( unlikely(bret->status != BLKIF_RSP_OKAY) ) 2.251 DPRINTK("Bad return from blkdev data request: %x\n", 2.252 bret->status); 2.253 - 2.254 + 2.255 if ( unlikely(end_that_request_first 2.256 (req, 2.257 (bret->status == BLKIF_RSP_OKAY), 2.258 @@ -813,10 +810,9 @@ static int blkif_queue_request(unsigned 2.259 blk_ring.req_prod_pvt - 1); 2.260 bh = (struct buffer_head *)id; 2.261 2.262 - bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id; 2.263 - 2.264 - rec_ring[req->id].id = id; 2.265 - 2.266 + bh->b_reqnext = (struct buffer_head *)blk_shadow[req->id].request; 2.267 + blk_shadow[req->id].request = (unsigned long)id; 2.268 + 2.269 #ifdef CONFIG_XEN_BLKDEV_GRANT 2.270 /* install a grant reference. */ 2.271 ref = gnttab_claim_grant_reference(&gref_head, gref_terminal); 2.272 @@ -828,6 +824,9 @@ static int blkif_queue_request(unsigned 2.273 buffer_ma >> PAGE_SHIFT, 2.274 ( operation == BLKIF_OP_WRITE ? 1 : 0 ) ); 2.275 2.276 + blk_shadow[id].frame[req->nr_segments] = 2.277 + buffer_ma >> PAGE_SHIFT; 2.278 + 2.279 req->frame_and_sects[req->nr_segments] = 2.280 (((u32) ref ) << 16) | (fsect << 3) | lsect; 2.281 #else 2.282 @@ -840,7 +839,7 @@ static int blkif_queue_request(unsigned 2.283 DISABLE_SCATTERGATHER(); 2.284 2.285 /* Update the copy of the request in the recovery ring. */ 2.286 - translate_req_to_pfn(&rec_ring[req->id], req ); 2.287 + pickle_request(&blk_shadow[req->id], req ); 2.288 2.289 return 0; 2.290 } 2.291 @@ -864,7 +863,7 @@ static int blkif_queue_request(unsigned 2.292 req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt); 2.293 2.294 xid = GET_ID_FROM_FREELIST(); 2.295 - rec_ring[xid].id = id; 2.296 + blk_shadow[xid].request = (unsigned long)id; 2.297 2.298 req->id = xid; 2.299 req->operation = operation; 2.300 @@ -882,13 +881,15 @@ static int blkif_queue_request(unsigned 2.301 buffer_ma >> PAGE_SHIFT, 2.302 ( operation == BLKIF_OP_WRITE ? 1 : 0 ) ); 2.303 2.304 + blk_shadow[xid].frame[0] = buffer_ma >> PAGE_SHIFT; 2.305 + 2.306 req->frame_and_sects[0] = (((u32) ref)<<16) | (fsect<<3) | lsect; 2.307 #else 2.308 req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect; 2.309 #endif 2.310 2.311 /* Keep a private copy so we can reissue requests when recovering. */ 2.312 - translate_req_to_pfn(&rec_ring[xid], req ); 2.313 + pickle_request(&blk_shadow[xid], req); 2.314 2.315 blk_ring.req_prod_pvt++; 2.316 2.317 @@ -1002,9 +1003,9 @@ static void blkif_int(int irq, void *dev 2.318 2.319 bret = RING_GET_RESPONSE(&blk_ring, i); 2.320 id = bret->id; 2.321 - bh = (struct buffer_head *)rec_ring[id].id; 2.322 + bh = (struct buffer_head *)blk_shadow[id].request; 2.323 2.324 - blkif_completion( &rec_ring[id] ); 2.325 + blkif_completion(&blk_shadow[id]); 2.326 2.327 ADD_ID_TO_FREELIST(id); 2.328 2.329 @@ -1083,9 +1084,9 @@ void blkif_control_send(blkif_request_t 2.330 2.331 id = GET_ID_FROM_FREELIST(); 2.332 req_d->id = id; 2.333 - rec_ring[id].id = (unsigned long) req; 2.334 + blk_shadow[id].request = (unsigned long)req; 2.335 2.336 - translate_req_to_pfn( &rec_ring[id], req ); 2.337 + pickle_request(&blk_shadow[id], req); 2.338 2.339 blk_ring.req_prod_pvt++; 2.340 flush_requests(); 2.341 @@ -1184,50 +1185,69 @@ static void blkif_recover(void) 2.342 { 2.343 int i; 2.344 blkif_request_t *req; 2.345 + struct blk_shadow *copy; 2.346 +#ifdef CONFIG_XEN_BLKDEV_GRANT 2.347 + int j; 2.348 +#endif 2.349 2.350 - /* Hmm, requests might be re-ordered when we re-issue them. 2.351 - * This will need to be fixed once we have barriers */ 2.352 + /* Stage 1: Make a safe copy of the shadow state. */ 2.353 + copy = (struct blk_shadow *)kmalloc(sizeof(blk_shadow), GFP_KERNEL); 2.354 + BUG_ON(copy == NULL); 2.355 + memcpy(copy, blk_shadow, sizeof(blk_shadow)); 2.356 2.357 - /* Stage 1 : Find active and move to safety. */ 2.358 + /* Stage 2: Set up free list. */ 2.359 + memset(&blk_shadow, 0, sizeof(blk_shadow)); 2.360 + for ( i = 0; i < BLK_RING_SIZE; i++ ) 2.361 + blk_shadow[i].req.id = i+1; 2.362 + blk_shadow_free = blk_ring.req_prod_pvt; 2.363 + blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 2.364 + 2.365 + /* Stage 3: Find pending requests and requeue them. */ 2.366 for ( i = 0; i < BLK_RING_SIZE; i++ ) 2.367 { 2.368 - if ( rec_ring[i].id >= PAGE_OFFSET ) 2.369 + /* Not in use? */ 2.370 + if ( copy[i].request == 0 ) 2.371 + continue; 2.372 + 2.373 + /* Grab a request slot and unpickle shadow state into it. */ 2.374 + req = RING_GET_REQUEST( 2.375 + &blk_ring, blk_ring.req_prod_pvt); 2.376 + unpickle_request(req, ©[i]); 2.377 + 2.378 + /* We get a new request id, and must reset the shadow state. */ 2.379 + req->id = GET_ID_FROM_FREELIST(); 2.380 + memcpy(&blk_shadow[req->id], ©[i], sizeof(copy[i])); 2.381 + 2.382 +#ifdef CONFIG_XEN_BLKDEV_GRANT 2.383 + /* Rewrite any grant references invalidated by suspend/resume. */ 2.384 + for ( j = 0; j < req->nr_segments; j++ ) 2.385 { 2.386 - req = RING_GET_REQUEST(&blk_ring, 2.387 - blk_ring.req_prod_pvt); 2.388 - translate_req_to_mfn(req, &rec_ring[i]); 2.389 - blk_ring.req_prod_pvt++; 2.390 + if ( req->frame_and_sects[j] & GRANTREF_INVALID ) 2.391 + gnttab_grant_foreign_access_ref( 2.392 + blkif_gref_from_fas(req->frame_and_sects[j]), 2.393 + rdomid, 2.394 + blk_shadow[req->id].frame[j], 2.395 + rq_data_dir((struct request *) 2.396 + blk_shadow[req->id].request)); 2.397 + req->frame_and_sects[j] &= ~GRANTREF_INVALID; 2.398 } 2.399 + blk_shadow[req->id].req = *req; 2.400 +#endif 2.401 + 2.402 + blk_ring.req_prod_pvt++; 2.403 } 2.404 2.405 - /* Stage 2 : Set up shadow list. */ 2.406 - for ( i = 0; i < blk_ring.req_prod_pvt; i++ ) 2.407 - { 2.408 - req = RING_GET_REQUEST(&blk_ring, i); 2.409 - rec_ring[i].id = req->id; 2.410 - req->id = i; 2.411 - translate_req_to_pfn(&rec_ring[i], req); 2.412 - } 2.413 + kfree(copy); 2.414 2.415 - /* Stage 3 : Set up free list. */ 2.416 - for ( ; i < BLK_RING_SIZE; i++ ) 2.417 - rec_ring[i].id = i+1; 2.418 - rec_ring_free = blk_ring.req_prod_pvt; 2.419 - rec_ring[BLK_RING_SIZE-1].id = 0x0fffffff; 2.420 + recovery = 0; 2.421 2.422 /* blk_ring->req_prod will be set when we flush_requests().*/ 2.423 wmb(); 2.424 2.425 - /* Switch off recovery mode, using a memory barrier to ensure that 2.426 - * it's seen before we flush requests - we don't want to miss any 2.427 - * interrupts. */ 2.428 - recovery = 0; 2.429 - wmb(); 2.430 - 2.431 /* Kicks things back into life. */ 2.432 flush_requests(); 2.433 2.434 - /* Now safe to left other peope use interface. */ 2.435 + /* Now safe to left other people use the interface. */ 2.436 blkif_state = BLKIF_STATE_CONNECTED; 2.437 } 2.438 2.439 @@ -1409,10 +1429,11 @@ int __init xlblk_init(void) 2.440 2.441 printk(KERN_INFO "xen_blk: Initialising virtual block device driver\n"); 2.442 2.443 - rec_ring_free = 0; 2.444 + blk_shadow_free = 0; 2.445 + memset(blk_shadow, 0, sizeof(blk_shadow)); 2.446 for ( i = 0; i < BLK_RING_SIZE; i++ ) 2.447 - rec_ring[i].id = i+1; 2.448 - rec_ring[BLK_RING_SIZE-1].id = 0x0fffffff; 2.449 + blk_shadow[i].req.id = i+1; 2.450 + blk_shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 2.451 2.452 (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx, 2.453 CALLBACK_IN_BLOCKING_CONTEXT); 2.454 @@ -1428,32 +1449,32 @@ void blkdev_suspend(void) 2.455 2.456 void blkdev_resume(void) 2.457 { 2.458 +#ifdef CONFIG_XEN_BLKDEV_GRANT 2.459 + int i, j; 2.460 + for ( i = 0; i < BLK_RING_SIZE; i++ ) 2.461 + for ( j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++ ) 2.462 + blk_shadow[i].req.frame_and_sects[j] |= GRANTREF_INVALID; 2.463 +#endif 2.464 send_driver_status(1); 2.465 } 2.466 2.467 -void blkif_completion(blkif_request_t *req) 2.468 +static void blkif_completion(struct blk_shadow *s) 2.469 { 2.470 int i; 2.471 #ifdef CONFIG_XEN_BLKDEV_GRANT 2.472 - grant_ref_t gref; 2.473 - 2.474 - for ( i = 0; i < req->nr_segments; i++ ) 2.475 - { 2.476 - gref = blkif_gref_from_fas(req->frame_and_sects[i]); 2.477 - gnttab_release_grant_reference(&gref_head, gref); 2.478 - } 2.479 + for ( i = 0; i < s->req.nr_segments; i++ ) 2.480 + gnttab_release_grant_reference( 2.481 + &gref_head, blkif_gref_from_fas(s->req.frame_and_sects[i])); 2.482 #else 2.483 /* This is a hack to get the dirty logging bits set */ 2.484 - switch ( req->operation ) 2.485 + if ( s->req.operation == BLKIF_OP_READ ) 2.486 { 2.487 - case BLKIF_OP_READ: 2.488 - for ( i = 0; i < req->nr_segments; i++ ) 2.489 + for ( i = 0; i < s->req.nr_segments; i++ ) 2.490 { 2.491 - unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT; 2.492 + unsigned long pfn = s->req.frame_and_sects[i] >> PAGE_SHIFT; 2.493 unsigned long mfn = phys_to_machine_mapping[pfn]; 2.494 xen_machphys_update(mfn, pfn); 2.495 } 2.496 - break; 2.497 } 2.498 #endif 2.499 }