[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 4/4] stubdom/vtpm: Support multiple backends and locality



On 11/27/2012 10:14 AM, Daniel De Graaf wrote:
The vTPM protocol now contains a field allowing the locality of a
command to be specified; pass this to the TPM when processing a packet.
This also enables a single vTPM to provide multiple tpmback interfaces
so that several closely related domains can share a vTPM (for example, a
qemu device stubdom and its target domain).

Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
  stubdom/tpmemu-0.7.4.patch | 61 ++++++++++++++++++++++++++++++++++++++++++----
  stubdom/vtpm/vtpm.c        | 16 +++---------
  2 files changed, 59 insertions(+), 18 deletions(-)

diff --git a/stubdom/tpmemu-0.7.4.patch b/stubdom/tpmemu-0.7.4.patch
index b84eff1..31ace1a 100644
--- a/stubdom/tpmemu-0.7.4.patch
+++ b/stubdom/tpmemu-0.7.4.patch
@@ -1,9 +1,60 @@
-diff -Naur tpm_emulator-x86_64-back/tpm/tpm_emulator_extern.c 
tpm_emulator-x86_64/tpm/tpm_emulator_extern.c
---- tpm_emulator-x86_64-back/tpm/tpm_emulator_extern.c 2012-04-27 
10:55:46.581963398 -0400
-+++ tpm_emulator-x86_64/tpm/tpm_emulator_extern.c      2012-04-27 
10:56:02.193034152 -0400
-@@ -249,7 +249,7 @@
+diff --git a/tpm/tpm_capability.c b/tpm/tpm_capability.c
+index 60bbb90..f8f7f0f 100644
+--- a/tpm/tpm_capability.c
++++ b/tpm/tpm_capability.c
+@@ -949,6 +949,8 @@ static TPM_RESULT set_vendor(UINT32 subCap, BYTE *setValue,
+                              UINT32 setValueSize, BOOL ownerAuth,
+                              BOOL deactivated, BOOL disabled)
+ {
++  if (tpmData.stany.flags.localityModifier != 8)
++    return TPM_BAD_PARAMETER;
+   /* set the capability area with the specified data, on failure
+      deactivate the TPM */
+   switch (subCap) {
+diff --git a/tpm/tpm_cmd_handler.c b/tpm/tpm_cmd_handler.c
+index 288d1ce..9e1cfb4 100644
+--- a/tpm/tpm_cmd_handler.c
++++ b/tpm/tpm_cmd_handler.c
+@@ -4132,7 +4132,7 @@ void tpm_emulator_shutdown()
+   tpm_extern_release();
+ }
+
+-int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, 
uint32_t *out_size)
++int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, 
uint32_t *out_size, int locality)
+ {
+   TPM_REQUEST req;
+   TPM_RESPONSE rsp;
+@@ -4140,7 +4140,9 @@ int tpm_handle_command(const uint8_t *in, uint32_t 
in_size, uint8_t **out, uint3
+   UINT32 len;
+   BOOL free_out;
+
+-  debug("tpm_handle_command()");
++  debug("tpm_handle_command(%d)", locality);
++  if (locality != -1)
++    tpmData.stany.flags.localityModifier = locality;
+
+   /* we need the whole packet at once, otherwise unmarshalling will fail */
+   if (tpm_unmarshal_TPM_REQUEST((uint8_t**)&in, &in_size, &req) != 0) {
+diff --git a/tpm/tpm_emulator.h b/tpm/tpm_emulator.h
+index eed749e..4c228bd 100644
+--- a/tpm/tpm_emulator.h
++++ b/tpm/tpm_emulator.h
+@@ -59,7 +59,7 @@ void tpm_emulator_shutdown(void);
+  * its usage. In case of an error, all internally allocated memory
+  * is released and the the state of out and out_size is unspecified.
+  */
+-int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, 
uint32_t *out_size);
++int tpm_handle_command(const uint8_t *in, uint32_t in_size, uint8_t **out, 
uint32_t *out_size, int locality);
+
+ #endif /* _TPM_EMULATOR_H_ */
+
+diff --git a/tpm/tpm_emulator_extern.c b/tpm/tpm_emulator_extern.c
+index aabe6c3..440a01b 100644
+--- a/tpm/tpm_emulator_extern.c
++++ b/tpm/tpm_emulator_extern.c
+@@ -249,7 +249,7 @@ int (*tpm_read_from_storage)(uint8_t **data, size_t 
*data_length) = _tpm_read_fr
   #else /* TPM_NO_EXTERN */
-
+
   int (*tpm_extern_init)(void)                                      = NULL;
  -int (*tpm_extern_release)(void)                                   = NULL;
  +void (*tpm_extern_release)(void)                                   = NULL;
diff --git a/stubdom/vtpm/vtpm.c b/stubdom/vtpm/vtpm.c
index c33e078..dcfc3b9 100644
--- a/stubdom/vtpm/vtpm.c
+++ b/stubdom/vtpm/vtpm.c
@@ -141,8 +141,6 @@ int check_ordinal(tpmcmd_t* tpmcmd) {
static void main_loop(void) {
     tpmcmd_t* tpmcmd = NULL;
-   domid_t domid;              /* Domid of frontend */
-   unsigned int handle;        /* handle of frontend */
     int res = -1;
info("VTPM Initializing\n");
@@ -162,15 +160,7 @@ static void main_loop(void) {
        goto abort_postpcrs;
     }
- /* Wait for the frontend domain to connect */
-   info("Waiting for frontend domain to connect..");
-   if(tpmback_wait_for_frontend_connect(&domid, &handle) == 0) {
-      info("VTPM attached to Frontend %u/%u", (unsigned int) domid, handle);
-   } else {
-      error("Unable to attach to a frontend");
-   }
-
-   tpmcmd = tpmback_req(domid, handle);
+   tpmcmd = tpmback_req_any();
     while(tpmcmd) {
        /* Handle the request */
        if(tpmcmd->req_len) {
@@ -183,7 +173,7 @@ static void main_loop(void) {
           }
           /* If not disabled, do the command */
           else {
-            if((res = tpm_handle_command(tpmcmd->req, tpmcmd->req_len, &tpmcmd->resp, 
&tpmcmd->resp_len)) != 0) {
+            if((res = tpm_handle_command(tpmcmd->req, tpmcmd->req_len, &tpmcmd->resp, 
&tpmcmd->resp_len, tpmcmd->locality)) != 0) {
                 error("tpm_handle_command() failed");
                 create_error_response(tpmcmd, TPM_FAIL);
              }
@@ -194,7 +184,7 @@ static void main_loop(void) {
        tpmback_resp(tpmcmd);
/* Wait for the next request */
-      tpmcmd = tpmback_req(domid, handle);
+      tpmcmd = tpmback_req_any();
}
Before the vtpm would shut down on its own when the host domain disconnects. This occurs because tpmback_req() returns NULL if the frontend disconnected. Using tpmback_req_any(), this is no longer the case which now means the user has to shut down the vtpm manually. How are you handling vtpm shutdown on your end?


Attachment: smime.p7s
Description: S/MIME Cryptographic Signature

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.