xen-vtx-unstable

view linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c @ 6774:4d899a738d59

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:05:49 2005 +0000 (2005-09-13)
parents 9ead08216805 cdfa7dd00c44
children e7c7196fa329 8ca0f98ba8e2
line source
1 /******************************************************************************
2 * drivers/xen/tpmback/tpmback.c
3 *
4 * Copyright (c) 2005, IBM Corporation
5 *
6 * Author: Stefan Berger, stefanb@us.ibm.com
7 * Grant table support: Mahadevan Gomathisankaran
8 *
9 * This code has been derived from drivers/xen/netback/netback.c
10 * Copyright (c) 2002-2004, K A Fraser
11 *
12 */
14 #include "common.h"
15 #include <asm-xen/evtchn.h>
17 #include <linux/types.h>
18 #include <linux/list.h>
19 #include <linux/miscdevice.h>
20 #include <asm/uaccess.h>
21 #include <asm-xen/xenbus.h>
22 #include <asm-xen/xen-public/grant_table.h>
25 struct data_exchange {
26 struct list_head pending_pak;
27 struct list_head current_pak;
28 unsigned int copied_so_far;
29 u8 has_opener;
30 rwlock_t pak_lock; // protects all of the previous fields
31 wait_queue_head_t wait_queue;
32 };
34 struct packet {
35 struct list_head next;
36 unsigned int data_len;
37 u8 *data_buffer;
38 tpmif_t *tpmif;
39 u32 tpm_instance;
40 u8 req_tag;
41 u32 last_read;
42 u8 flags;
43 struct timer_list processing_timer;
44 };
46 enum {
47 PACKET_FLAG_DISCARD_RESPONSE = 1,
48 PACKET_FLAG_SEND_CONTROLMESSAGE = 2,
49 };
51 static struct data_exchange dataex;
53 /* local function prototypes */
54 static int vtpm_queue_packet(struct packet *pak);
55 static int _packet_write(struct packet *pak,
56 const char *data, size_t size,
57 int userbuffer);
58 static void processing_timeout(unsigned long ptr);
59 static int packet_read_shmem(struct packet *pak,
60 tpmif_t *tpmif,
61 u32 offset,
62 char *buffer,
63 int isuserbuffer,
64 u32 left);
67 #define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
69 static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
71 #define MIN(x,y) (x) < (y) ? (x) : (y)
73 /***************************************************************
74 Packet-related functions
75 ***************************************************************/
77 static struct packet *
78 packet_find_instance(struct list_head *head, u32 tpm_instance)
79 {
80 struct packet *pak;
81 struct list_head *p;
82 /*
83 * traverse the list of packets and return the first
84 * one with the given instance number
85 */
86 list_for_each(p, head) {
87 pak = list_entry(p, struct packet, next);
88 if (pak->tpm_instance == tpm_instance) {
89 return pak;
90 }
91 }
92 return NULL;
93 }
95 static struct packet *
96 packet_find_packet(struct list_head *head, void *packet)
97 {
98 struct packet *pak;
99 struct list_head *p;
100 /*
101 * traverse the list of packets and return the first
102 * one with the given instance number
103 */
104 list_for_each(p, head) {
105 pak = list_entry(p, struct packet, next);
106 if (pak == packet) {
107 return pak;
108 }
109 }
110 return NULL;
111 }
113 static struct packet *
114 packet_alloc(tpmif_t *tpmif, u32 size, u8 req_tag, u8 flags)
115 {
116 struct packet *pak = NULL;
117 pak = kmalloc(sizeof(struct packet),
118 GFP_KERNEL);
119 if (NULL != pak) {
120 memset(pak, 0x0, sizeof(*pak));
121 if (tpmif) {
122 pak->tpmif = tpmif;
123 pak->tpm_instance = tpmif->tpm_instance;
124 }
125 pak->data_len = size;
126 pak->req_tag = req_tag;
127 pak->last_read = 0;
128 pak->flags = flags;
130 /*
131 * cannot do tpmif_get(tpmif); bad things happen
132 * on the last tpmif_put()
133 */
134 init_timer(&pak->processing_timer);
135 pak->processing_timer.function = processing_timeout;
136 pak->processing_timer.data = (unsigned long)pak;
137 }
138 return pak;
139 }
141 static void inline
142 packet_reset(struct packet *pak)
143 {
144 pak->last_read = 0;
145 }
147 static void inline
148 packet_free(struct packet *pak)
149 {
150 del_singleshot_timer_sync(&pak->processing_timer);
151 if (pak->data_buffer) {
152 kfree(pak->data_buffer);
153 }
154 /*
155 * cannot do tpmif_put(pak->tpmif); bad things happen
156 * on the last tpmif_put()
157 */
158 kfree(pak);
159 }
161 static int
162 packet_set(struct packet *pak,
163 const unsigned char *buffer, u32 size)
164 {
165 int rc = 0;
166 unsigned char *buf = kmalloc(size, GFP_KERNEL);
167 if (NULL != buf) {
168 pak->data_buffer = buf;
169 memcpy(buf, buffer, size);
170 pak->data_len = size;
171 } else {
172 rc = -ENOMEM;
173 }
174 return rc;
175 }
178 /*
179 * Write data to the shared memory and send it to the FE.
180 */
181 static int
182 packet_write(struct packet *pak,
183 const char *data, size_t size,
184 int userbuffer)
185 {
186 int rc = 0;
188 DPRINTK("Supposed to send %d bytes to front-end!\n",
189 size);
191 if (0 != (pak->flags & PACKET_FLAG_SEND_CONTROLMESSAGE)) {
192 #ifdef CONFIG_XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
193 u32 res;
194 memcpy(&res, &data[2+4], sizeof(res));
195 if (res != 0) {
196 /*
197 * Will close down this device and have the
198 * FE notified about closure.
199 */
200 }
201 #endif
202 }
204 if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
205 /* Don't send a respone to this packet. Just acknowledge it. */
206 rc = size;
207 } else {
208 rc = _packet_write(pak, data, size, userbuffer);
209 }
211 return rc;
212 }
215 static int
216 _packet_write(struct packet *pak,
217 const char *data, size_t size,
218 int userbuffer)
219 {
220 /*
221 * Write into the shared memory pages directly
222 * and send it to the front end.
223 */
224 tpmif_t *tpmif = pak->tpmif;
225 u16 handle;
226 int rc = 0;
227 unsigned int i = 0;
228 unsigned int offset = 0;
229 multicall_entry_t *mcl;
231 if (tpmif == NULL)
232 return -EFAULT;
234 if (tpmif->status != CONNECTED) {
235 return size;
236 }
238 mcl = tx_mcl;
239 while (offset < size && i < TPMIF_TX_RING_SIZE) {
240 unsigned int tocopy;
241 struct gnttab_map_grant_ref map_op;
242 struct gnttab_unmap_grant_ref unmap_op;
243 tpmif_tx_request_t *tx;
245 tx = &tpmif->tx->ring[i].req;
247 if (0 == tx->addr) {
248 DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
249 return 0;
250 }
252 map_op.host_addr = MMAP_VADDR(tpmif, i);
253 map_op.flags = GNTMAP_host_map;
254 map_op.ref = tx->ref;
255 map_op.dom = tpmif->domid;
257 if(unlikely(
258 HYPERVISOR_grant_table_op(
259 GNTTABOP_map_grant_ref,
260 &map_op,
261 1))) {
262 BUG();
263 }
265 handle = map_op.handle;
267 if (map_op.handle < 0) {
268 DPRINTK(" Grant table operation failure !\n");
269 return 0;
270 }
271 phys_to_machine_mapping[__pa(MMAP_VADDR(tpmif,i)) >>
272 PAGE_SHIFT] =
273 FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT);
275 tocopy = size - offset;
276 if (tocopy > PAGE_SIZE) {
277 tocopy = PAGE_SIZE;
278 }
279 if (userbuffer) {
280 if (copy_from_user((void *)(MMAP_VADDR(tpmif,i) |
281 (tx->addr & ~PAGE_MASK)),
282 (void __user *)&data[offset],
283 tocopy)) {
284 tpmif_put(tpmif);
285 return -EFAULT;
286 }
287 } else {
288 memcpy((void *)(MMAP_VADDR(tpmif,i) |
289 (tx->addr & ~PAGE_MASK)),
290 &data[offset], tocopy);
291 }
292 tx->size = tocopy;
294 unmap_op.host_addr = MMAP_VADDR(tpmif, i);
295 unmap_op.handle = handle;
296 unmap_op.dev_bus_addr = 0;
298 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
299 &unmap_op,
300 1))) {
301 BUG();
302 }
304 offset += tocopy;
305 i++;
306 }
308 rc = offset;
309 DPRINTK("Notifying frontend via event channel %d\n",
310 tpmif->evtchn);
311 notify_via_evtchn(tpmif->evtchn);
313 return rc;
314 }
316 /*
317 * Read data from the shared memory and copy it directly into the
318 * provided buffer. Advance the read_last indicator which tells
319 * how many bytes have already been read.
320 */
321 static int
322 packet_read(struct packet *pak, size_t numbytes,
323 char *buffer, size_t buffersize,
324 int userbuffer)
325 {
326 tpmif_t *tpmif = pak->tpmif;
327 /*
328 * I am supposed to read 'numbytes' of data from the
329 * buffer.
330 * The first 4 bytes that are read are the instance number in
331 * network byte order, after that comes the data from the
332 * shared memory buffer.
333 */
334 u32 to_copy;
335 u32 offset = 0;
336 u32 room_left = buffersize;
337 /*
338 * Ensure that we see the request when we copy it.
339 */
340 mb();
342 if (pak->last_read < 4) {
343 /*
344 * copy the instance number into the buffer
345 */
346 u32 instance_no = htonl(pak->tpm_instance);
347 u32 last_read = pak->last_read;
348 to_copy = MIN(4 - last_read, numbytes);
350 if (userbuffer) {
351 if (copy_to_user(&buffer[0],
352 &(((u8 *)&instance_no)[last_read]),
353 to_copy)) {
354 return -EFAULT;
355 }
356 } else {
357 memcpy(&buffer[0],
358 &(((u8 *)&instance_no)[last_read]),
359 to_copy);
360 }
362 pak->last_read += to_copy;
363 offset += to_copy;
364 room_left -= to_copy;
365 }
367 /*
368 * If the packet has a data buffer appended, read from it...
369 */
371 if (room_left > 0) {
372 if (pak->data_buffer) {
373 u32 to_copy = MIN(pak->data_len - offset, room_left);
374 u32 last_read = pak->last_read - 4;
375 if (userbuffer) {
376 if (copy_to_user(&buffer[offset],
377 &pak->data_buffer[last_read],
378 to_copy)) {
379 return -EFAULT;
380 }
381 } else {
382 memcpy(&buffer[offset],
383 &pak->data_buffer[last_read],
384 to_copy);
385 }
386 pak->last_read += to_copy;
387 offset += to_copy;
388 } else {
389 offset = packet_read_shmem(pak,
390 tpmif,
391 offset,
392 buffer,
393 userbuffer,
394 room_left);
395 }
396 }
397 return offset;
398 }
401 static int
402 packet_read_shmem(struct packet *pak,
403 tpmif_t *tpmif,
404 u32 offset,
405 char *buffer,
406 int isuserbuffer,
407 u32 room_left) {
408 u32 last_read = pak->last_read - 4;
409 u32 i = (last_read / PAGE_SIZE);
410 u32 pg_offset = last_read & (PAGE_SIZE - 1);
411 u32 to_copy;
412 u16 handle;
414 tpmif_tx_request_t *tx;
415 tx = &tpmif->tx->ring[0].req;
416 /*
417 * Start copying data at the page with index 'index'
418 * and within that page at offset 'offset'.
419 * Copy a maximum of 'room_left' bytes.
420 */
421 to_copy = MIN(PAGE_SIZE - pg_offset, room_left);
422 while (to_copy > 0) {
423 void *src;
424 struct gnttab_map_grant_ref map_op;
425 struct gnttab_unmap_grant_ref unmap_op;
427 tx = &tpmif->tx->ring[i].req;
429 map_op.host_addr = MMAP_VADDR(tpmif, i);
430 map_op.flags = GNTMAP_host_map;
431 map_op.ref = tx->ref;
432 map_op.dom = tpmif->domid;
434 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
435 &map_op,
436 1))) {
437 BUG();
438 }
440 if (map_op.handle < 0) {
441 DPRINTK(" Grant table operation failure !\n");
442 return -EFAULT;
443 }
445 handle = map_op.handle;
447 if (to_copy > tx->size) {
448 /*
449 * This is the case when the user wants to read more
450 * than what we have. So we just give him what we
451 * have.
452 */
453 to_copy = MIN(tx->size, to_copy);
454 }
456 DPRINTK("Copying from mapped memory at %08lx\n",
457 (unsigned long)(MMAP_VADDR(tpmif,i) |
458 (tx->addr & ~PAGE_MASK)));
460 src = (void *)(MMAP_VADDR(tpmif,i) | ((tx->addr & ~PAGE_MASK) + pg_offset));
461 if (isuserbuffer) {
462 if (copy_to_user(&buffer[offset],
463 src,
464 to_copy)) {
465 return -EFAULT;
466 }
467 } else {
468 memcpy(&buffer[offset],
469 src,
470 to_copy);
471 }
474 DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
475 tpmif->domid, buffer[offset], buffer[offset+1],buffer[offset+2],buffer[offset+3]);
477 unmap_op.host_addr = MMAP_VADDR(tpmif, i);
478 unmap_op.handle = handle;
479 unmap_op.dev_bus_addr = 0;
481 if(unlikely(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
482 &unmap_op,
483 1))) {
484 BUG();
485 }
487 offset += to_copy;
488 pg_offset = 0;
489 last_read += to_copy;
490 room_left -= to_copy;
492 to_copy = MIN(PAGE_SIZE, room_left);
493 i++;
494 } /* while (to_copy > 0) */
495 /*
496 * Adjust the last_read pointer
497 */
498 pak->last_read = last_read + 4;
499 return offset;
500 }
503 /* ============================================================
504 * The file layer for reading data from this device
505 * ============================================================
506 */
507 static int
508 vtpm_op_open(struct inode *inode, struct file *f)
509 {
510 int rc = 0;
511 unsigned long flags;
513 write_lock_irqsave(&dataex.pak_lock, flags);
514 if (dataex.has_opener == 0) {
515 dataex.has_opener = 1;
516 } else {
517 rc = -EPERM;
518 }
519 write_unlock_irqrestore(&dataex.pak_lock, flags);
520 return rc;
521 }
523 static ssize_t
524 vtpm_op_read(struct file *file,
525 char __user * data, size_t size, loff_t * offset)
526 {
527 int ret_size = -ENODATA;
528 struct packet *pak = NULL;
529 unsigned long flags;
531 write_lock_irqsave(&dataex.pak_lock, flags);
533 if (list_empty(&dataex.pending_pak)) {
534 write_unlock_irqrestore(&dataex.pak_lock, flags);
535 wait_event_interruptible(dataex.wait_queue,
536 !list_empty(&dataex.pending_pak));
537 write_lock_irqsave(&dataex.pak_lock, flags);
538 }
540 if (!list_empty(&dataex.pending_pak)) {
541 unsigned int left;
542 pak = list_entry(dataex.pending_pak.next, struct packet, next);
544 left = pak->data_len - dataex.copied_so_far;
546 DPRINTK("size given by app: %d, available: %d\n", size, left);
548 ret_size = MIN(size,left);
550 ret_size = packet_read(pak, ret_size, data, size, 1);
551 if (ret_size < 0) {
552 ret_size = -EFAULT;
553 } else {
554 DPRINTK("Copied %d bytes to user buffer\n", ret_size);
556 dataex.copied_so_far += ret_size;
557 if (dataex.copied_so_far >= pak->data_len + 4) {
558 DPRINTK("All data from this packet given to app.\n");
559 /* All data given to app */
561 del_singleshot_timer_sync(&pak->processing_timer);
562 list_del(&pak->next);
563 list_add_tail(&pak->next, &dataex.current_pak);
564 /*
565 * The more fontends that are handled at the same time,
566 * the more time we give the TPM to process the request.
567 */
568 mod_timer(&pak->processing_timer,
569 jiffies + (num_frontends * 10 * HZ));
570 dataex.copied_so_far = 0;
571 }
572 }
573 }
574 write_unlock_irqrestore(&dataex.pak_lock, flags);
576 DPRINTK("Returning result from read to app: %d\n", ret_size);
578 return ret_size;
579 }
581 /*
582 * Write operation - only works after a previous read operation!
583 */
584 static ssize_t
585 vtpm_op_write(struct file *file, const char __user * data, size_t size,
586 loff_t * offset)
587 {
588 struct packet *pak;
589 int rc = 0;
590 unsigned int off = 4;
591 unsigned long flags;
592 u32 instance_no = 0;
593 u32 len_no = 0;
595 /*
596 * Minimum required packet size is:
597 * 4 bytes for instance number
598 * 2 bytes for tag
599 * 4 bytes for paramSize
600 * 4 bytes for the ordinal
601 * sum: 14 bytes
602 */
603 if ( size < off + 10 ) {
604 return -EFAULT;
605 }
607 if (copy_from_user(&instance_no,
608 (void __user *)&data[0],
609 4)) {
610 return -EFAULT;
611 }
613 if (copy_from_user(&len_no,
614 (void __user *)&data[off+2],
615 4) ||
616 (off + ntohl(len_no) != size)) {
617 return -EFAULT;
618 }
620 write_lock_irqsave(&dataex.pak_lock, flags);
621 pak = packet_find_instance(&dataex.current_pak, ntohl(instance_no));
623 if (pak == NULL) {
624 write_unlock_irqrestore(&dataex.pak_lock, flags);
625 printk(KERN_ALERT "No associated packet!\n");
626 return -EFAULT;
627 } else {
628 del_singleshot_timer_sync(&pak->processing_timer);
629 list_del(&pak->next);
630 }
632 write_unlock_irqrestore(&dataex.pak_lock, flags);
634 /*
635 * The first 'offset' bytes must be the instance number.
636 * I will just pull that from the packet.
637 */
638 size -= off;
639 data = &data[off];
641 rc = packet_write(pak, data, size, 1);
643 if (rc > 0) {
644 /* I neglected the first 4 bytes */
645 rc += off;
646 }
647 packet_free(pak);
648 return rc;
649 }
651 static int
652 vtpm_op_release(struct inode *inode, struct file *file)
653 {
654 unsigned long flags;
655 vtpm_release_packets(NULL, 1);
656 write_lock_irqsave(&dataex.pak_lock, flags);
657 dataex.has_opener = 0;
658 write_unlock_irqrestore(&dataex.pak_lock, flags);
659 return 0;
660 }
662 static unsigned int
663 vtpm_op_poll(struct file *file, struct poll_table_struct *pst)
664 {
665 return 0;
666 }
668 static struct file_operations vtpm_ops = {
669 .owner = THIS_MODULE,
670 .llseek = no_llseek,
671 .open = vtpm_op_open,
672 .read = vtpm_op_read,
673 .write = vtpm_op_write,
674 .release = vtpm_op_release,
675 .poll = vtpm_op_poll,
676 };
678 static struct miscdevice ibmvtpms_miscdevice = {
679 .minor = 225,
680 .name = "vtpm",
681 .fops = &vtpm_ops,
682 };
685 /***************************************************************
686 Virtual TPM functions and data stuctures
687 ***************************************************************/
689 static u8 create_cmd[] = {
690 1,193, /* 0: TPM_TAG_RQU_COMMAMD */
691 0,0,0,19, /* 2: length */
692 0,0,0,0x1, /* 6: VTPM_ORD_OPEN */
693 0, /* 10: VTPM type */
694 0,0,0,0, /* 11: domain id */
695 0,0,0,0 /* 15: instance id */
696 };
698 static u8 destroy_cmd[] = {
699 1,193, /* 0: TPM_TAG_RQU_COMMAMD */
700 0,0,0,14, /* 2: length */
701 0,0,0,0x2, /* 6: VTPM_ORD_CLOSE */
702 0,0,0,0 /* 10: instance id */
703 };
705 int tpmif_vtpm_open(tpmif_t *tpmif, domid_t domid, u32 instance)
706 {
707 int rc = 0;
708 struct packet *pak = packet_alloc(tpmif, sizeof(create_cmd), create_cmd[0],
709 PACKET_FLAG_DISCARD_RESPONSE|
710 PACKET_FLAG_SEND_CONTROLMESSAGE);
711 if (pak) {
712 u8 buf[sizeof(create_cmd)];
713 u32 domid_no = htonl((u32)domid);
714 u32 instance_no = htonl(instance);
715 memcpy(buf, create_cmd, sizeof(create_cmd));
717 memcpy(&buf[11], &domid_no, sizeof(u32));
718 memcpy(&buf[15], &instance_no, sizeof(u32));
720 /* copy the buffer into the packet */
721 rc = packet_set(pak, buf, sizeof(buf));
723 if (rc == 0) {
724 pak->tpm_instance = 0;
725 rc = vtpm_queue_packet(pak);
726 }
727 if (rc < 0) {
728 /* could not be queued or built */
729 packet_free(pak);
730 }
731 } else {
732 rc = -ENOMEM;
733 }
734 return rc;
735 }
737 int tpmif_vtpm_close(u32 instid)
738 {
739 int rc = 0;
740 struct packet *pak;
742 pak = packet_alloc(NULL,
743 sizeof(create_cmd),
744 create_cmd[0],
745 PACKET_FLAG_DISCARD_RESPONSE|
746 PACKET_FLAG_SEND_CONTROLMESSAGE);
747 if (pak) {
748 u8 buf[sizeof(destroy_cmd)];
749 u32 instid_no = htonl(instid);
750 memcpy(buf, destroy_cmd, sizeof(destroy_cmd));
751 memcpy(&buf[10], &instid_no, sizeof(u32));
753 /* copy the buffer into the packet */
754 rc = packet_set(pak, buf, sizeof(buf));
756 if (rc == 0) {
757 pak->tpm_instance = 0;
758 rc = vtpm_queue_packet(pak);
759 }
760 if (rc < 0) {
761 /* could not be queued or built */
762 packet_free(pak);
763 }
764 } else {
765 rc = -ENOMEM;
766 }
767 return rc;
768 }
771 /***************************************************************
772 Utility functions
773 ***************************************************************/
775 static int
776 tpm_send_fail_message(struct packet *pak, u8 req_tag)
777 {
778 int rc;
779 static const unsigned char tpm_error_message_fail[] = {
780 0x00, 0x00,
781 0x00, 0x00, 0x00, 0x0a,
782 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
783 };
784 unsigned char buffer[sizeof(tpm_error_message_fail)];
786 memcpy(buffer, tpm_error_message_fail, sizeof(tpm_error_message_fail));
787 /*
788 * Insert the right response tag depending on the given tag
789 * All response tags are '+3' to the request tag.
790 */
791 buffer[1] = req_tag + 3;
793 /*
794 * Write the data to shared memory and notify the front-end
795 */
796 rc = packet_write(pak, buffer, sizeof(buffer), 0);
798 return rc;
799 }
802 static void
803 _vtpm_release_packets(struct list_head *head, tpmif_t *tpmif,
804 int send_msgs)
805 {
806 struct packet *pak;
807 struct list_head *pos, *tmp;
809 list_for_each_safe(pos, tmp, head) {
810 pak = list_entry(pos, struct packet, next);
811 if (tpmif == NULL || pak->tpmif == tpmif) {
812 int can_send = 0;
813 del_singleshot_timer_sync(&pak->processing_timer);
814 list_del(&pak->next);
816 if (pak->tpmif && pak->tpmif->status == CONNECTED) {
817 can_send = 1;
818 }
820 if (send_msgs && can_send) {
821 tpm_send_fail_message(pak, pak->req_tag);
822 }
823 packet_free(pak);
824 }
825 }
826 }
829 int
830 vtpm_release_packets(tpmif_t *tpmif, int send_msgs)
831 {
832 unsigned long flags;
834 write_lock_irqsave(&dataex.pak_lock, flags);
836 _vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
837 _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
839 write_unlock_irqrestore(&dataex.pak_lock,
840 flags);
841 return 0;
842 }
845 static int vtpm_queue_packet(struct packet *pak)
846 {
847 int rc = 0;
848 if (dataex.has_opener) {
849 unsigned long flags;
850 write_lock_irqsave(&dataex.pak_lock, flags);
851 list_add_tail(&pak->next, &dataex.pending_pak);
852 /* give the TPM some time to pick up the request */
853 mod_timer(&pak->processing_timer, jiffies + (10 * HZ));
854 write_unlock_irqrestore(&dataex.pak_lock,
855 flags);
857 wake_up_interruptible(&dataex.wait_queue);
858 } else {
859 rc = -EFAULT;
860 }
861 return rc;
862 }
865 static int vtpm_receive(tpmif_t *tpmif, u32 size)
866 {
867 int rc = 0;
868 unsigned char buffer[10];
869 __be32 *native_size;
871 struct packet *pak = packet_alloc(tpmif, size, buffer[4], 0);
872 if (NULL == pak) {
873 return -ENOMEM;
874 }
875 /*
876 * Read 10 bytes from the received buffer to test its
877 * content for validity.
878 */
879 if (sizeof(buffer) != packet_read(pak,
880 sizeof(buffer), buffer,
881 sizeof(buffer), 0)) {
882 goto failexit;
883 }
884 /*
885 * Reset the packet read pointer so we can read all its
886 * contents again.
887 */
888 packet_reset(pak);
890 native_size = (__force __be32 *)(&buffer[4+2]);
891 /*
892 * Verify that the size of the packet is correct
893 * as indicated and that there's actually someone reading packets.
894 * The minimum size of the packet is '10' for tag, size indicator
895 * and ordinal.
896 */
897 if (size < 10 ||
898 be32_to_cpu(*native_size) != size ||
899 0 == dataex.has_opener) {
900 rc = -EINVAL;
901 goto failexit;
902 } else {
903 if ((rc = vtpm_queue_packet(pak)) < 0) {
904 goto failexit;
905 }
906 }
907 return 0;
909 failexit:
910 if (pak) {
911 tpm_send_fail_message(pak, buffer[4+1]);
912 packet_free(pak);
913 }
914 return rc;
915 }
918 /*
919 * Timeout function that gets invoked when a packet has not been processed
920 * during the timeout period.
921 * The packet must be on a list when this function is invoked. This
922 * also means that once its taken off a list, the timer must be
923 * destroyed as well.
924 */
925 static void processing_timeout(unsigned long ptr)
926 {
927 struct packet *pak = (struct packet *)ptr;
928 unsigned long flags;
929 write_lock_irqsave(&dataex.pak_lock, flags);
930 /*
931 * The packet needs to be searched whether it
932 * is still on the list.
933 */
934 if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
935 pak == packet_find_packet(&dataex.current_pak, pak) ) {
936 list_del(&pak->next);
937 tpm_send_fail_message(pak, pak->req_tag);
938 packet_free(pak);
939 }
941 write_unlock_irqrestore(&dataex.pak_lock, flags);
942 }
946 static void tpm_tx_action(unsigned long unused);
947 static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
949 #define MAX_PENDING_REQS TPMIF_TX_RING_SIZE
951 static struct list_head tpm_schedule_list;
952 static spinlock_t tpm_schedule_list_lock;
954 static inline void
955 maybe_schedule_tx_action(void)
956 {
957 smp_mb();
958 tasklet_schedule(&tpm_tx_tasklet);
959 }
961 static inline int
962 __on_tpm_schedule_list(tpmif_t * tpmif)
963 {
964 return tpmif->list.next != NULL;
965 }
967 static void
968 remove_from_tpm_schedule_list(tpmif_t * tpmif)
969 {
970 spin_lock_irq(&tpm_schedule_list_lock);
971 if (likely(__on_tpm_schedule_list(tpmif))) {
972 list_del(&tpmif->list);
973 tpmif->list.next = NULL;
974 tpmif_put(tpmif);
975 }
976 spin_unlock_irq(&tpm_schedule_list_lock);
977 }
979 static void
980 add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
981 {
982 if (__on_tpm_schedule_list(tpmif))
983 return;
985 spin_lock_irq(&tpm_schedule_list_lock);
986 if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
987 list_add_tail(&tpmif->list, &tpm_schedule_list);
988 tpmif_get(tpmif);
989 }
990 spin_unlock_irq(&tpm_schedule_list_lock);
991 }
993 void
994 tpmif_schedule_work(tpmif_t * tpmif)
995 {
996 add_to_tpm_schedule_list_tail(tpmif);
997 maybe_schedule_tx_action();
998 }
1000 void
1001 tpmif_deschedule_work(tpmif_t * tpmif)
1003 remove_from_tpm_schedule_list(tpmif);
1007 static void
1008 tpm_tx_action(unsigned long unused)
1010 struct list_head *ent;
1011 tpmif_t *tpmif;
1012 tpmif_tx_request_t *tx;
1014 DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
1016 while (!list_empty(&tpm_schedule_list)) {
1017 /* Get a tpmif from the list with work to do. */
1018 ent = tpm_schedule_list.next;
1019 tpmif = list_entry(ent, tpmif_t, list);
1020 tpmif_get(tpmif);
1021 remove_from_tpm_schedule_list(tpmif);
1022 /*
1023 * Ensure that we see the request when we read from it.
1024 */
1025 mb();
1027 tx = &tpmif->tx->ring[0].req;
1029 /* pass it up */
1030 vtpm_receive(tpmif, tx->size);
1032 tpmif_put(tpmif);
1036 irqreturn_t
1037 tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
1039 tpmif_t *tpmif = dev_id;
1040 add_to_tpm_schedule_list_tail(tpmif);
1041 maybe_schedule_tx_action();
1042 return IRQ_HANDLED;
1045 static int __init
1046 tpmback_init(void)
1048 int rc;
1049 if (!(xen_start_info->flags & SIF_TPM_BE_DOMAIN) &&
1050 !(xen_start_info->flags & SIF_INITDOMAIN)) {
1051 printk(KERN_ALERT "Neither TPM-BE Domain nor INIT domain!\n");
1052 return 0;
1055 if ((rc = misc_register(&ibmvtpms_miscdevice)) != 0) {
1056 printk(KERN_ALERT "Could not register misc device for TPM BE.\n");
1057 return rc;
1060 INIT_LIST_HEAD(&dataex.pending_pak);
1061 INIT_LIST_HEAD(&dataex.current_pak);
1062 dataex.has_opener = 0;
1063 rwlock_init(&dataex.pak_lock);
1064 init_waitqueue_head(&dataex.wait_queue);
1066 spin_lock_init(&tpm_schedule_list_lock);
1067 INIT_LIST_HEAD(&tpm_schedule_list);
1069 tpmif_interface_init();
1070 tpmif_xenbus_init();
1072 printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
1074 return 0;
1077 __initcall(tpmback_init);