|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] drivers/tpm-xen: Change vTPM shared page ABI
>>> On 10.12.12 at 21:00, Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> wrote:
> This changes the vTPM shared page ABI from a copy of the Xen network
> interface to a single-page interface that better reflects the expected
> behavior of a TPM: only a single request packet can be sent at any given
> time, and every packet sent generates a single response packet. This
> protocol change should also increase efficiency as it avoids mapping and
> unmapping grants when possible.
Given
-#define TPMIF_TX_RING_SIZE 1
where was the problem?
Also, the patch replaces the old interface by the new one - how
is that compatible with older implementations?
The positive aspect is that the new interface isn't address size
dependent anymore (and hence mixed size backend/frontend can
work together, which isn't the case for the original one).
Jan
> Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
> ---
> drivers/char/tpm/xen-tpmfront_if.c | 195
> +++++++------------------------------
> include/xen/interface/io/tpmif.h | 42 ++------
> 2 files changed, 42 insertions(+), 195 deletions(-)
>
> diff --git a/drivers/char/tpm/xen-tpmfront_if.c
> b/drivers/char/tpm/xen-tpmfront_if.c
> index ba7fad8..374ad1b 100644
> --- a/drivers/char/tpm/xen-tpmfront_if.c
> +++ b/drivers/char/tpm/xen-tpmfront_if.c
> @@ -53,7 +53,7 @@
> struct tpm_private {
> struct tpm_chip *chip;
>
> - struct tpmif_tx_interface *tx;
> + struct vtpm_shared_page *page;
> atomic_t refcnt;
> unsigned int evtchn;
> u8 is_connected;
> @@ -61,8 +61,6 @@ struct tpm_private {
>
> spinlock_t tx_lock;
>
> - struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
> -
> atomic_t tx_busy;
> void *tx_remember;
>
> @@ -73,15 +71,7 @@ struct tpm_private {
> int ring_ref;
> };
>
> -struct tx_buffer {
> - unsigned int size; /* available space in data */
> - unsigned int len; /* used space in data */
> - unsigned char *data; /* pointer to a page */
> -};
> -
> -
> /* locally visible variables */
> -static grant_ref_t gref_head;
> static struct tpm_private *my_priv;
>
> /* local function prototypes */
> @@ -92,8 +82,6 @@ static int tpmif_connect(struct xenbus_device *dev,
> struct tpm_private *tp,
> domid_t domid);
> static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
> -static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
> -static void tpmif_free_tx_buffers(struct tpm_private *tp);
> static void tpmif_set_connected_state(struct tpm_private *tp,
> u8 newstate);
> static int tpm_xmit(struct tpm_private *tp,
> @@ -101,52 +89,6 @@ static int tpm_xmit(struct tpm_private *tp,
> void *remember);
> static void destroy_tpmring(struct tpm_private *tp);
>
> -static inline int
> -tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
> - int isuserbuffer)
> -{
> - int copied = len;
> -
> - if (len > txb->size)
> - copied = txb->size;
> - if (isuserbuffer) {
> - if (copy_from_user(txb->data, src, copied))
> - return -EFAULT;
> - } else {
> - memcpy(txb->data, src, copied);
> - }
> - txb->len = len;
> - return copied;
> -}
> -
> -static inline struct tx_buffer *tx_buffer_alloc(void)
> -{
> - struct tx_buffer *txb;
> -
> - txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
> - if (!txb)
> - return NULL;
> -
> - txb->len = 0;
> - txb->size = PAGE_SIZE;
> - txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
> - if (txb->data == NULL) {
> - kfree(txb);
> - txb = NULL;
> - }
> -
> - return txb;
> -}
> -
> -
> -static inline void tx_buffer_free(struct tx_buffer *txb)
> -{
> - if (txb) {
> - free_page((long)txb->data);
> - kfree(txb);
> - }
> -}
> -
> /**************************************************************
> Utility function for the tpm_private structure
> **************************************************************/
> @@ -162,15 +104,12 @@ static void tpm_private_put(void)
> if (!atomic_dec_and_test(&my_priv->refcnt))
> return;
>
> - tpmif_free_tx_buffers(my_priv);
> kfree(my_priv);
> my_priv = NULL;
> }
>
> static struct tpm_private *tpm_private_get(void)
> {
> - int err;
> -
> if (my_priv) {
> atomic_inc(&my_priv->refcnt);
> return my_priv;
> @@ -181,9 +120,6 @@ static struct tpm_private *tpm_private_get(void)
> return NULL;
>
> tpm_private_init(my_priv);
> - err = tpmif_allocate_tx_buffers(my_priv);
> - if (err < 0)
> - tpm_private_put();
>
> return my_priv;
> }
> @@ -218,22 +154,22 @@ int vtpm_vd_send(struct tpm_private *tp,
> static int setup_tpmring(struct xenbus_device *dev,
> struct tpm_private *tp)
> {
> - struct tpmif_tx_interface *sring;
> + struct vtpm_shared_page *sring;
> int err;
>
> tp->ring_ref = GRANT_INVALID_REF;
>
> - sring = (void *)__get_free_page(GFP_KERNEL);
> + sring = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
> if (!sring) {
> xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
> return -ENOMEM;
> }
> - tp->tx = sring;
> + tp->page = sring;
>
> - err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
> + err = xenbus_grant_ring(dev, virt_to_mfn(tp->page));
> if (err < 0) {
> free_page((unsigned long)sring);
> - tp->tx = NULL;
> + tp->page = NULL;
> xenbus_dev_fatal(dev, err, "allocating grant reference");
> goto fail;
> }
> @@ -256,9 +192,9 @@ static void destroy_tpmring(struct tpm_private *tp)
>
> if (tp->ring_ref != GRANT_INVALID_REF) {
> gnttab_end_foreign_access(tp->ring_ref,
> - 0, (unsigned long)tp->tx);
> + 0, (unsigned long)tp->page);
> tp->ring_ref = GRANT_INVALID_REF;
> - tp->tx = NULL;
> + tp->page = NULL;
> }
>
> if (tp->evtchn)
> @@ -457,10 +393,10 @@ static int tpmif_connect(struct xenbus_device *dev,
> }
>
> static const struct xenbus_device_id tpmfront_ids[] = {
> - { "vtpm" },
> + { "vtpm2" },
> { "" }
> };
> -MODULE_ALIAS("xen:vtpm");
> +MODULE_ALIAS("xen:vtpm2");
>
> static DEFINE_XENBUS_DRIVER(tpmfront, ,
> .probe = tpmfront_probe,
> @@ -470,62 +406,30 @@ static DEFINE_XENBUS_DRIVER(tpmfront, ,
> .suspend = tpmfront_suspend,
> );
>
> -static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
> -{
> - unsigned int i;
> -
> - for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
> - tp->tx_buffers[i] = tx_buffer_alloc();
> - if (!tp->tx_buffers[i]) {
> - tpmif_free_tx_buffers(tp);
> - return -ENOMEM;
> - }
> - }
> - return 0;
> -}
> -
> -static void tpmif_free_tx_buffers(struct tpm_private *tp)
> -{
> - unsigned int i;
> -
> - for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
> - tx_buffer_free(tp->tx_buffers[i]);
> -}
> -
> static void tpmif_rx_action(unsigned long priv)
> {
> struct tpm_private *tp = (struct tpm_private *)priv;
> - int i = 0;
> unsigned int received;
> unsigned int offset = 0;
> u8 *buffer;
> - struct tpmif_tx_request *tx = &tp->tx->ring[i].req;
> + struct vtpm_shared_page *shr = tp->page;
>
> atomic_set(&tp->tx_busy, 0);
> wake_up_interruptible(&tp->wait_q);
>
> - received = tx->size;
> + offset = sizeof(*shr) + 4*shr->nr_extra_pages;
> + received = shr->length;
> +
> + if (offset > PAGE_SIZE || offset + received > PAGE_SIZE) {
> + printk(KERN_WARNING "tpmif_rx_action packet too large\n");
> + return;
> + }
>
> buffer = kmalloc(received, GFP_ATOMIC);
> if (!buffer)
> return;
>
> - for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
> - struct tx_buffer *txb = tp->tx_buffers[i];
> - struct tpmif_tx_request *tx;
> - unsigned int tocopy;
> -
> - tx = &tp->tx->ring[i].req;
> - tocopy = tx->size;
> - if (tocopy > PAGE_SIZE)
> - tocopy = PAGE_SIZE;
> -
> - memcpy(&buffer[offset], txb->data, tocopy);
> -
> - gnttab_release_grant_reference(&gref_head, tx->ref);
> -
> - offset += tocopy;
> - }
> + memcpy(buffer, offset + (u8*)shr, received);
>
> vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
> kfree(buffer);
> @@ -550,8 +454,7 @@ static int tpm_xmit(struct tpm_private *tp,
> const u8 *buf, size_t count, int isuserbuffer,
> void *remember)
> {
> - struct tpmif_tx_request *tx;
> - int i;
> + struct vtpm_shared_page *shr;
> unsigned int offset = 0;
>
> spin_lock_irq(&tp->tx_lock);
> @@ -566,48 +469,23 @@ static int tpm_xmit(struct tpm_private *tp,
> return -EIO;
> }
>
> - for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
> - struct tx_buffer *txb = tp->tx_buffers[i];
> - int copied;
> -
> - if (!txb) {
> - spin_unlock_irq(&tp->tx_lock);
> - return -EFAULT;
> - }
> -
> - copied = tx_buffer_copy(txb, &buf[offset], count,
> - isuserbuffer);
> - if (copied < 0) {
> - /* An error occurred */
> - spin_unlock_irq(&tp->tx_lock);
> - return copied;
> - }
> - count -= copied;
> - offset += copied;
> -
> - tx = &tp->tx->ring[i].req;
> - tx->addr = virt_to_machine(txb->data).maddr;
> - tx->size = txb->len;
> - tx->unused = 0;
> -
> - /* Get the granttable reference for this page. */
> - tx->ref = gnttab_claim_grant_reference(&gref_head);
> - if (tx->ref == -ENOSPC) {
> - spin_unlock_irq(&tp->tx_lock);
> - return -ENOSPC;
> - }
> - gnttab_grant_foreign_access_ref(tx->ref,
> - tp->backend_id,
> - virt_to_mfn(txb->data),
> - 0 /*RW*/);
> - wmb();
> - }
> + shr = tp->page;
> + offset = sizeof(*shr) + 4*shr->nr_extra_pages;
> +
> + if (offset > PAGE_SIZE)
> + return -EIO;
> +
> + if (offset + count > PAGE_SIZE)
> + count = PAGE_SIZE - offset;
> +
> + memcpy(offset + (u8*)shr, buf, count);
> + shr->length = count;
> + barrier();
> + shr->state = 1;
>
> atomic_set(&tp->tx_busy, 1);
> tp->tx_remember = remember;
>
> - mb();
> -
> notify_remote_via_evtchn(tp->evtchn);
>
> spin_unlock_irq(&tp->tx_lock);
> @@ -667,12 +545,6 @@ static int __init tpmif_init(void)
> if (!tp)
> return -ENOMEM;
>
> - if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
> - &gref_head) < 0) {
> - tpm_private_put();
> - return -EFAULT;
> - }
> -
> return xenbus_register_frontend(&tpmfront_driver);
> }
> module_init(tpmif_init);
> @@ -680,7 +552,6 @@ module_init(tpmif_init);
> static void __exit tpmif_exit(void)
> {
> xenbus_unregister_driver(&tpmfront_driver);
> - gnttab_free_grant_references(gref_head);
> tpm_private_put();
> }
> module_exit(tpmif_exit);
> diff --git a/include/xen/interface/io/tpmif.h
> b/include/xen/interface/io/tpmif.h
> index c9e7294..b55ac56 100644
> --- a/include/xen/interface/io/tpmif.h
> +++ b/include/xen/interface/io/tpmif.h
> @@ -1,7 +1,7 @@
>
> /****************************************************************************
> **
> * tpmif.h
> *
> - * TPM I/O interface for Xen guest OSes.
> + * TPM I/O interface for Xen guest OSes, v2
> *
> * Permission is hereby granted, free of charge, to any person obtaining a
> copy
> * of this software and associated documentation files (the "Software"), to
> @@ -21,45 +21,21 @@
> * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
> * DEALINGS IN THE SOFTWARE.
> *
> - * Copyright (c) 2005, IBM Corporation
> - *
> - * Author: Stefan Berger, stefanb@xxxxxxxxxx
> - * Grant table support: Mahadevan Gomathisankaran
> - *
> - * This code has been derived from tools/libxc/xen/io/netif.h
> - *
> - * Copyright (c) 2003-2004, Keir Fraser
> */
>
> #ifndef __XEN_PUBLIC_IO_TPMIF_H__
> #define __XEN_PUBLIC_IO_TPMIF_H__
>
> -#include "../grant_table.h"
> -
> -struct tpmif_tx_request {
> - unsigned long addr; /* Machine address of packet. */
> - grant_ref_t ref; /* grant table access reference */
> - uint16_t unused;
> - uint16_t size; /* Packet size in bytes. */
> -};
> -struct tpmif_tx_request;
> +struct vtpm_shared_page {
> + uint32_t length; /* request/response length in bytes */
>
> -/*
> - * The TPMIF_TX_RING_SIZE defines the number of pages the
> - * front-end and backend can exchange (= size of array).
> - */
> -#define TPMIF_TX_RING_SIZE 1
> -
> -/* This structure must fit in a memory page. */
> -
> -struct tpmif_ring {
> - struct tpmif_tx_request req;
> -};
> -struct tpmif_ring;
> + uint8_t state; /* 0 - response ready / idle
> + * 1 - request ready / working */
> + uint8_t locality; /* for the current request */
> + uint8_t pad;
>
> -struct tpmif_tx_interface {
> - struct tpmif_ring ring[TPMIF_TX_RING_SIZE];
> + uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
> + uint32_t extra_pages[0]; /* grant IDs; length is actually
> nr_extra_pages */
> };
> -struct tpmif_tx_interface;
>
> #endif
> --
> 1.7.11.7
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |