diff -r 1af0183813dd -r 9fd760c9e92c linux-2.6-xen-sparse/include/asm-xen/xenidc.h --- /dev/null Fri Nov 25 18:44:13 2005 +++ b/linux-2.6-xen-sparse/include/asm-xen/xenidc.h Fri Nov 25 18:50:14 2005 @@ -0,0 +1,164 @@ +/*****************************************************************************/ +/* Xen inter-domain communication API. */ +/* */ +/* Copyright (c) 2005 Harry Butterworth IBM Corporation */ +/* */ +/* This program is free software; you can redistribute it and/or modify it */ +/* under the terms of the GNU General Public License as published by the */ +/* Free Software Foundation; either version 2 of the License, or (at your */ +/* option) any later version. */ +/* */ +/* This program is distributed in the hope that it will be useful, but */ +/* WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General */ +/* Public License for more details. */ +/* */ +/* You should have received a copy of the GNU General Public License along */ +/* with this program; if not, write to the Free Software Foundation, Inc., */ +/* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* */ +/*****************************************************************************/ + +#ifndef XENIDC_H +#define XENIDC_H + +/*****************************************************************************/ +/* The xenidc code provides two main facilities: an interdomain */ +/* communication 'endpoint' for use in sending messages and transactions */ +/* between domains and 'remote buffer reference' 'provider' and 'mapper' */ +/* pools for use in managing bulk-data transfer between domains. */ +/* The xenidc code also provides local buffer references which are a */ +/* convenient abstraction for local buffers and some ways of accessing them. */ +/* */ +/* The intended use is that the client would allocate an endpoint per device */ +/* in the FE and BE and would configure them to connect to each other. The */ +/* client would define a device-specific protocol consisting of messages and */ +/* transactions and would use this protocol over the transport provided by */ +/* the endpoint to connect the FE and BE device objects together. */ +/* */ +/* The device specific protocol can contain remote buffer references which */ +/* describe buffers for bulk data transfer. The client initiator uses a */ +/* remote buffer reference provider to create remote buffer references which */ +/* refer to buffers in its local domain. The client initiator passes the */ +/* remote buffer references in transaction parameters to the client target */ +/* in a remote domain. The client target uses a remote buffer reference */ +/* mapper pool to map the buffers references by the remote buffer references */ +/* into its local memory. The client target can the access the data in the */ +/* buffers. */ +/* */ +/* A NOTE ON RESOURCE MANAGEMENT: */ +/* */ +/* The bulk data transfer mechanism uses the following strategy for resource */ +/* management: */ +/* */ +/* 1) Create an anti-deadlock resource pool. This pool of resources is used */ +/* to guarantee that your subsystem can make progress independent of all */ +/* other subsystems using the xenidc API. The subsystem operations using */ +/* the resource pool must be independent and the resource pool must contain */ +/* enough resources to service the worst case individual operation. This */ +/* guarantees progress because, in the worst case, an operation can wait for */ +/* all other operations to complete (they are independent so they will */ +/* complete eventually) at which time the resource pool will be replenished */ +/* such that it contains enough resources to start the waiting operation. */ +/* */ +/* 2) Make a single atomic request to reserve and create/reserve and open */ +/* all of the local/remote buffers required for the operation to proceed to */ +/* completion. This request is guaranteed to complete because it is atomic */ +/* and so will eventually get a chance to acquire all the resources it needs */ +/* which by design will be less than the anti-deadlock reservation. */ +/* */ +/* 3) Dependent operations using independent anti-deadlock pools acquire */ +/* resources from pools in a defined order. */ +/* */ +/* An alternative implementation could support an additional API for the */ +/* following strategy which might be more convenient for some clients: */ +/* */ +/* 1) Create an anti-deadlock resource pool as above. */ +/* */ +/* 2) Make a single atomic reservation to reserve all the resources required */ +/* for an operation to proceed to completion. This request is guaranteed to */ +/* complete because it is atomic and so will eventually get a chance to */ +/* acquire all the resources it needs which by design will be less than the */ +/* anti-deadlock reservation. */ +/* */ +/* 3) Make an arbitrary number of create/open requests using the resources */ +/* reserved in step 2. These are guaranteed to complete because the */ +/* resources reserved in step 2 are sufficient by design. */ +/* */ +/* 4) Dependent operations using independent anti-deadlock pools acquire */ +/* resources from pools in a defined order. */ +/* */ +/* The following strategies are examples which in general DO NOT WORK: */ +/* */ +/* First incorrect strategy: */ +/* */ +/* 1) Have no anti-deadlock resource pool. */ +/* */ +/* This doesn't work because all the xenidc resources might be allocated to */ +/* an operation that is dependent on your subsystem for completion. A */ +/* deadlock results where your subsystem is waiting on xenidc which is */ +/* waiting on the client of your subsystem to free resources which is */ +/* waiting on your subsystem. */ +/* */ +/* Second incorrect strategy: */ +/* */ +/* 1) Create an anti-deadlock resource pool. */ +/* */ +/* 2) Make an arbitrary number of non-atomic reserve_and_create and */ +/* reserve_and_open requests for a single subsystem operation. */ +/* */ +/* In general, this doesn't work because your subsystem will be processing */ +/* multiple operations concurrently which means that the anti-deadlock */ +/* resources might get fully used by multiple concurrent operations in such */ +/* a way that none of the operations have enough resources to proceed so */ +/* they all end up waiting indefinitely on each other. */ +/* */ +/* Third incorrect strategy: */ +/* */ +/* 1) Create an anti-deadlock resource pool. */ +/* */ +/* 2) Use the same anti-deadlock pool for dependent operations. */ +/* */ +/* This doesn't work because an operation might acquire all the */ +/* anti-deadlock reservation and then initiate a second operation. The */ +/* second operation goes to the same anti-deadlock pool and waits forever */ +/* for the dependent operation to free the resources back. The dependent */ +/* operation never frees the resources because it is dependent on the second */ +/* operation. */ +/* */ +/* Fourth incorrect strategy: */ +/* */ +/* 1) Create anti-deadlock resource pools for different dependent */ +/* operations. */ +/* */ +/* 2) Different operations acquire resources from the pools in different */ +/* order. */ +/* */ +/* This doesn't work because of the potential for another cyclic deadlock. */ +/* */ +/*****************************************************************************/ + +/* Here's the endpoint: */ +#include "xenidc_endpoint.h" + +/* Here's the remote buffer reference provider pool: */ +#include "xenidc_rbr_provider_pool.h" + +/* Here's the remote buffer reference mapper pool: */ +#include "xenidc_rbr_mapper_pool.h" + +/* Here's a convenient way of concatenating buffers referred to by local */ +/* buffer references. */ +#include "xenidc_concatenate.h" + +/* Here's a convenient way of acessing a buffer referred to by a local */ +/* buffer reference as a wrapping buffer. */ +#include "xenidc_wrapping.h" + +/* Here's a local buffer reference type for kernel virtual address space. */ +#include "xenidc_vaddress.h" + +/* Here's a remote buffer reference type based on the grant tables API. */ +#include "xenidc_grant_table.h" + +#endif