|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] replace bogus gdprintk() uses with {, d}printk()
On 15/02/2012 14:58, "Jan Beulich" <JBeulich@xxxxxxxx> wrote:
> When the subject domain is not the current one (e.g. during domctl or
> HVM save/restore handling), use of gdprintk() is questionable at best,
> as it won't give the intended information on what domain is affected.
> Use plain printk() or dprintk() instead, but keep things (mostly) as
> guest messages by using XENLOG_G_*.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -783,7 +783,8 @@ long arch_do_domctl(
> spin_unlock(&pcidevs_lock);
> }
> if ( ret < 0 )
> - gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
> + printk(XENLOG_G_ERR "pt_irq_create_bind failed (%ld) for
> dom%d\n",
> + ret, d->domain_id);
>
> bind_out:
> rcu_unlock_domain(d);
> @@ -812,7 +813,8 @@ long arch_do_domctl(
> spin_unlock(&pcidevs_lock);
> }
> if ( ret < 0 )
> - gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
> + printk(XENLOG_G_ERR "pt_irq_destroy_bind failed (%ld) for
> dom%d\n",
> + ret, d->domain_id);
>
> unbind_out:
> rcu_unlock_domain(d);
> @@ -849,9 +851,9 @@ long arch_do_domctl(
>
> if ( add )
> {
> - gdprintk(XENLOG_INFO,
> - "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
> - gfn, mfn, nr_mfns);
> + printk(XENLOG_G_INFO
> + "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> + d->domain_id, gfn, mfn, nr_mfns);
>
> ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
> for ( i = 0; i < nr_mfns; i++ )
> @@ -859,9 +861,9 @@ long arch_do_domctl(
> }
> else
> {
> - gdprintk(XENLOG_INFO,
> - "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
> - gfn, mfn, nr_mfns);
> + printk(XENLOG_G_INFO
> + "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
> + d->domain_id, gfn, mfn, nr_mfns);
>
> for ( i = 0; i < nr_mfns; i++ )
> clear_mmio_p2m_entry(d, gfn+i);
> @@ -888,9 +890,9 @@ long arch_do_domctl(
> if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
> ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
> {
> - gdprintk(XENLOG_ERR,
> - "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
> - fgp, fmp, np);
> + printk(XENLOG_G_ERR
> + "ioport_map:invalid:dom%d gport=%x mport=%x nr=%x\n",
> + domctl->domain, fgp, fmp, np);
> break;
> }
>
> @@ -912,9 +914,9 @@ long arch_do_domctl(
> hd = domain_hvm_iommu(d);
> if ( add )
> {
> - gdprintk(XENLOG_INFO,
> - "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
> - fgp, fmp, np);
> + printk(XENLOG_G_INFO
> + "ioport_map:add: dom%d gport=%x mport=%x nr=%x\n",
> + d->domain_id, fgp, fmp, np);
>
> list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
> if (g2m_ioport->mport == fmp )
> @@ -936,9 +938,9 @@ long arch_do_domctl(
> }
> else
> {
> - gdprintk(XENLOG_INFO,
> - "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
> - fgp, fmp, np);
> + printk(XENLOG_G_INFO
> + "ioport_map:remove: dom%d gport=%x mport=%x nr=%x\n",
> + d->domain_id, fgp, fmp, np);
> list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
> if ( g2m_ioport->mport == fmp )
> {
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -681,7 +681,8 @@ static int hvm_load_cpu_ctxt(struct doma
> vcpuid = hvm_load_instance(h);
> if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
> + dprintk(XENLOG_G_ERR, "HVM restore: dom%u has no vcpu%u\n",
> + d->domain_id, vcpuid);
> return -EINVAL;
> }
>
> @@ -693,15 +694,15 @@ static int hvm_load_cpu_ctxt(struct doma
> !(ctxt.cr0 & X86_CR0_ET) ||
> ((ctxt.cr0 & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG) )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: bad CR0 0x%"PRIx64"\n",
> - ctxt.cr0);
> + printk(XENLOG_G_ERR "HVM%d restore: bad CR0 %#" PRIx64 "\n",
> + d->domain_id, ctxt.cr0);
> return -EINVAL;
> }
>
> if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS(v) )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: bad CR4 0x%"PRIx64"\n",
> - ctxt.cr4);
> + printk(XENLOG_G_ERR "HVM%d restore: bad CR4 %#" PRIx64 "\n",
> + d->domain_id, ctxt.cr4);
> return -EINVAL;
> }
>
> @@ -709,8 +710,8 @@ static int hvm_load_cpu_ctxt(struct doma
> | EFER_NX | EFER_SCE;
> if ( !hvm_efer_valid(d, ctxt.msr_efer, efer_validbits) )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n",
> - ctxt.msr_efer);
> + printk(XENLOG_G_ERR "HVM%d restore: bad EFER %#" PRIx64 "\n",
> + d->domain_id, ctxt.msr_efer);
> return -EINVAL;
> }
>
> @@ -889,7 +890,8 @@ static int hvm_load_cpu_xsave_states(str
> vcpuid = hvm_load_instance(h);
> if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
> + dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
> + d->domain_id, vcpuid);
> return -EINVAL;
> }
>
> @@ -901,25 +903,25 @@ static int hvm_load_cpu_xsave_states(str
> desc = (struct hvm_save_descriptor *)&h->data[h->cur];
> if ( sizeof (*desc) > h->size - h->cur)
> {
> - gdprintk(XENLOG_WARNING,
> - "HVM restore: not enough data left to read descriptpr"
> - "for type %u\n", CPU_XSAVE_CODE);
> + printk(XENLOG_G_WARNING
> + "HVM%d restore: not enough data left to read descriptor"
> + "for type %u\n", d->domain_id, CPU_XSAVE_CODE);
> return -1;
> }
> if ( desc->length + sizeof (*desc) > h->size - h->cur)
> {
> - gdprintk(XENLOG_WARNING,
> - "HVM restore: not enough data left to read %u bytes "
> - "for type %u\n", desc->length, CPU_XSAVE_CODE);
> + printk(XENLOG_G_WARNING
> + "HVM%d restore: not enough data left to read %u bytes "
> + "for type %u\n", d->domain_id, desc->length, CPU_XSAVE_CODE);
> return -1;
> }
> if ( CPU_XSAVE_CODE != desc->typecode || (desc->length >
> HVM_CPU_XSAVE_SIZE) )
> {
> - gdprintk(XENLOG_WARNING,
> - "HVM restore mismatch: expected type %u with max length %u,
> "
> - "saw type %u length %u\n", CPU_XSAVE_CODE,
> - (uint32_t)HVM_CPU_XSAVE_SIZE,
> - desc->typecode, desc->length);
> + printk(XENLOG_G_WARNING
> + "HVM%d restore mismatch: expected type %u with max length %u,
> "
> + "saw type %u length %u\n", d->domain_id, CPU_XSAVE_CODE,
> + (unsigned int)HVM_CPU_XSAVE_SIZE,
> + desc->typecode, desc->length);
> return -1;
> }
> h->cur += sizeof (*desc);
> --- a/xen/arch/x86/hvm/mtrr.c
> +++ b/xen/arch/x86/hvm/mtrr.c
> @@ -667,7 +667,8 @@ static int hvm_load_mtrr_msr(struct doma
> vcpuid = hvm_load_instance(h);
> if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
> + dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
> + d->domain_id, vcpuid);
> return -EINVAL;
> }
>
> --- a/xen/arch/x86/hvm/save.c
> +++ b/xen/arch/x86/hvm/save.c
> @@ -42,24 +42,24 @@ int arch_hvm_load(struct domain *d, stru
>
> if ( hdr->magic != HVM_FILE_MAGIC )
> {
> - gdprintk(XENLOG_ERR,
> - "HVM restore: bad magic number %#"PRIx32"\n", hdr->magic);
> + printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n",
> + d->domain_id, hdr->magic);
> return -1;
> }
>
> if ( hdr->version != HVM_FILE_VERSION )
> {
> - gdprintk(XENLOG_ERR,
> - "HVM restore: unsupported version %u\n", hdr->version);
> + printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n",
> + d->domain_id, hdr->version);
> return -1;
> }
>
> cpuid(1, &eax, &ebx, &ecx, &edx);
> /* CPUs ought to match but with feature-masking they might not */
> if ( (hdr->cpuid & ~0x0fUL) != (eax & ~0x0fUL) )
> - gdprintk(XENLOG_INFO, "HVM restore (%u): VM saved on one CPU "
> - "(%#"PRIx32") and restored on another (%#"PRIx32").\n",
> - d->domain_id, hdr->cpuid, eax);
> + printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU "
> + "(%#"PRIx32") and restored on another (%#"PRIx32").\n",
> + d->domain_id, hdr->cpuid, eax);
>
> /* Restore guest's preferred TSC frequency. */
> if ( hdr->gtsc_khz )
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -448,7 +448,8 @@ static int viridian_load_vcpu_ctxt(struc
> vcpuid = hvm_load_instance(h);
> if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
> + dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
> + d->domain_id, vcpuid);
> return -EINVAL;
> }
>
> --- a/xen/arch/x86/hvm/vlapic.c
> +++ b/xen/arch/x86/hvm/vlapic.c
> @@ -1136,7 +1136,8 @@ static int lapic_load_hidden(struct doma
> vcpuid = hvm_load_instance(h);
> if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n",
> vcpuid);
> + dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no apic%u\n",
> + d->domain_id, vcpuid);
> return -EINVAL;
> }
> s = vcpu_vlapic(v);
> @@ -1159,7 +1160,8 @@ static int lapic_load_regs(struct domain
> vcpuid = hvm_load_instance(h);
> if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
> {
> - gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n",
> vcpuid);
> + dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no apic%u\n",
> + d->domain_id, vcpuid);
> return -EINVAL;
> }
> s = vcpu_vlapic(v);
> --- a/xen/arch/x86/irq.c
> +++ b/xen/arch/x86/irq.c
> @@ -1517,9 +1517,9 @@ int pirq_guest_bind(struct vcpu *v, stru
> {
> if ( desc->action != NULL )
> {
> - gdprintk(XENLOG_INFO,
> - "Cannot bind IRQ %d to guest. In use by '%s'.\n",
> - pirq->pirq, desc->action->name);
> + printk(XENLOG_G_INFO
> + "Cannot bind IRQ%d to dom%d. In use by '%s'.\n",
> + pirq->pirq, v->domain->domain_id, desc->action->name);
> rc = -EBUSY;
> goto unlock_out;
> }
> @@ -1531,9 +1531,9 @@ int pirq_guest_bind(struct vcpu *v, stru
> zalloc_cpumask_var(&newaction->cpu_eoi_map) )
> goto retry;
> xfree(newaction);
> - gdprintk(XENLOG_INFO,
> - "Cannot bind IRQ %d to guest. Out of memory.\n",
> - pirq->pirq);
> + printk(XENLOG_G_INFO
> + "Cannot bind IRQ%d to dom%d. Out of memory.\n",
> + pirq->pirq, v->domain->domain_id);
> rc = -ENOMEM;
> goto out;
> }
> @@ -1558,11 +1558,10 @@ int pirq_guest_bind(struct vcpu *v, stru
> }
> else if ( !will_share || !action->shareable )
> {
> - gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. %s.\n",
> - pirq->pirq,
> - will_share ?
> - "Others do not share" :
> - "Will not share with others");
> + printk(XENLOG_G_INFO "Cannot bind IRQ%d to dom%d. %s.\n",
> + pirq->pirq, v->domain->domain_id,
> + will_share ? "Others do not share"
> + : "Will not share with others");
> rc = -EBUSY;
> goto unlock_out;
> }
> @@ -1581,8 +1580,9 @@ int pirq_guest_bind(struct vcpu *v, stru
>
> if ( action->nr_guests == IRQ_MAX_GUESTS )
> {
> - gdprintk(XENLOG_INFO, "Cannot bind IRQ %d to guest. "
> - "Already at max share.\n", pirq->pirq);
> + printk(XENLOG_G_INFO "Cannot bind IRQ%d to dom%d. "
> + "Already at max share.\n",
> + pirq->pirq, v->domain->domain_id);
> rc = -EBUSY;
> goto unlock_out;
> }
> --- a/xen/arch/x86/oprofile/op_model_ppro.c
> +++ b/xen/arch/x86/oprofile/op_model_ppro.c
> @@ -235,10 +235,10 @@ static int ppro_allocate_msr(struct vcpu
> vpmu_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED);
> return 1;
> out:
> - gdprintk(XENLOG_WARNING, "Insufficient memory for oprofile, oprofile
> is "
> - "unavailable on domain %d vcpu %d.\n",
> - v->vcpu_id, v->domain->domain_id);
> - return 0;
> + printk(XENLOG_G_WARNING "Insufficient memory for oprofile,"
> + " oprofile is unavailable on dom%d vcpu%d\n",
> + v->vcpu_id, v->domain->domain_id);
> + return 0;
> }
>
> static void ppro_free_msr(struct vcpu *v)
> --- a/xen/arch/x86/time.c
> +++ b/xen/arch/x86/time.c
> @@ -945,8 +945,8 @@ int cpu_frequency_change(u64 freq)
> /* Sanity check: CPU frequency allegedly dropping below 1MHz? */
> if ( freq < 1000000u )
> {
> - gdprintk(XENLOG_WARNING, "Rejecting CPU frequency change "
> - "to %"PRIu64" Hz.\n", freq);
> + printk(XENLOG_WARNING "Rejecting CPU frequency change "
> + "to %"PRIu64" Hz\n", freq);
> return -EINVAL;
> }
>
> --- a/xen/common/hvm/save.c
> +++ b/xen/common/hvm/save.c
> @@ -108,8 +108,8 @@ int hvm_save_one(struct domain *d, uint1
>
> if ( hvm_sr_handlers[typecode].save(d, &ctxt) != 0 )
> {
> - gdprintk(XENLOG_ERR,
> - "HVM save: failed to save type %"PRIu16"\n", typecode);
> + printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16"\n",
> + d->domain_id, typecode);
> rv = -EFAULT;
> }
> else if ( copy_to_guest(handle,
> @@ -149,7 +149,8 @@ int hvm_save(struct domain *d, hvm_domai
>
> if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 )
> {
> - gdprintk(XENLOG_ERR, "HVM save: failed to write header\n");
> + printk(XENLOG_G_ERR "HVM%d save: failed to write header\n",
> + d->domain_id);
> return -EFAULT;
> }
>
> @@ -159,11 +160,13 @@ int hvm_save(struct domain *d, hvm_domai
> handler = hvm_sr_handlers[i].save;
> if ( handler != NULL )
> {
> - gdprintk(XENLOG_INFO, "HVM save: %s\n",
> hvm_sr_handlers[i].name);
> + printk(XENLOG_G_INFO "HVM%d save: %s\n",
> + d->domain_id, hvm_sr_handlers[i].name);
> if ( handler(d, h) != 0 )
> {
> - gdprintk(XENLOG_ERR,
> - "HVM save: failed to save type %"PRIu16"\n", i);
> + printk(XENLOG_G_ERR
> + "HVM%d save: failed to save type %"PRIu16"\n",
> + d->domain_id, i);
> return -EFAULT;
> }
> }
> @@ -173,7 +176,8 @@ int hvm_save(struct domain *d, hvm_domai
> if ( hvm_save_entry(END, 0, h, &end) != 0 )
> {
> /* Run out of data */
> - gdprintk(XENLOG_ERR, "HVM save: no room for end marker.\n");
> + printk(XENLOG_G_ERR "HVM%d save: no room for end marker\n",
> + d->domain_id);
> return -EFAULT;
> }
>
> @@ -209,8 +213,9 @@ int hvm_load(struct domain *d, hvm_domai
> if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) )
> {
> /* Run out of data */
> - gdprintk(XENLOG_ERR,
> - "HVM restore: save did not end with a null entry\n");
> + printk(XENLOG_G_ERR
> + "HVM%d restore: save did not end with a null entry\n",
> + d->domain_id);
> return -1;
> }
>
> @@ -223,20 +228,18 @@ int hvm_load(struct domain *d, hvm_domai
> if ( (desc->typecode > HVM_SAVE_CODE_MAX) ||
> ((handler = hvm_sr_handlers[desc->typecode].load) == NULL) )
> {
> - gdprintk(XENLOG_ERR,
> - "HVM restore: unknown entry typecode %u\n",
> - desc->typecode);
> + printk(XENLOG_G_ERR "HVM%d restore: unknown entry typecode %u\n",
> + d->domain_id, desc->typecode);
> return -1;
> }
>
> /* Load the entry */
> - gdprintk(XENLOG_INFO, "HVM restore: %s %"PRIu16"\n",
> - hvm_sr_handlers[desc->typecode].name, desc->instance);
> + printk(XENLOG_G_INFO "HVM%d restore: %s %"PRIu16"\n", d->domain_id,
> + hvm_sr_handlers[desc->typecode].name, desc->instance);
> if ( handler(d, h) != 0 )
> {
> - gdprintk(XENLOG_ERR,
> - "HVM restore: failed to load entry %u/%u\n",
> - desc->typecode, desc->instance);
> + printk(XENLOG_G_ERR "HVM%d restore: failed to load entry
> %u/%u\n",
> + d->domain_id, desc->typecode, desc->instance);
> return -1;
> }
> }
> @@ -251,10 +254,9 @@ int _hvm_init_entry(struct hvm_domain_co
> = (struct hvm_save_descriptor *)&h->data[h->cur];
> if ( h->size - h->cur < len + sizeof (*d) )
> {
> - gdprintk(XENLOG_WARNING,
> - "HVM save: no room for %"PRIu32" + %u bytes "
> - "for typecode %"PRIu16"\n",
> - len, (unsigned) sizeof (*d), tc);
> + printk(XENLOG_G_WARNING "HVM save: no room for"
> + " %"PRIu32" + %zu bytes for typecode %"PRIu16"\n",
> + len, sizeof(*d), tc);
> return -1;
> }
> d->typecode = tc;
> @@ -278,17 +280,17 @@ int _hvm_check_entry(struct hvm_domain_c
> = (struct hvm_save_descriptor *)&h->data[h->cur];
> if ( len + sizeof (*d) > h->size - h->cur)
> {
> - gdprintk(XENLOG_WARNING,
> - "HVM restore: not enough data left to read %u bytes "
> - "for type %u\n", len, type);
> + printk(XENLOG_G_WARNING
> + "HVM restore: not enough data left to read %u bytes "
> + "for type %u\n", len, type);
> return -1;
> }
> if ( (type != d->typecode) || (len < d->length) ||
> (strict_length && (len != d->length)) )
> {
> - gdprintk(XENLOG_WARNING,
> - "HVM restore mismatch: expected type %u length %u, "
> - "saw type %u length %u\n", type, len, d->typecode,
> d->length);
> + printk(XENLOG_G_WARNING
> + "HVM restore mismatch: expected type %u length %u, "
> + "saw type %u length %u\n", type, len, d->typecode, d->length);
> return -1;
> }
> h->cur += sizeof(*d);
> --- a/xen/common/sysctl.c
> +++ b/xen/common/sysctl.c
> @@ -299,8 +299,6 @@ long do_sysctl(XEN_GUEST_HANDLE(xen_sysc
> ret = query_page_offline(pfn, ptr++);
> break;
> default:
> - gdprintk(XENLOG_WARNING, "invalid page offline op %x\n",
> - op->u.page_offline.cmd);
> ret = -EINVAL;
> break;
> }
> --- a/xen/common/xenoprof.c
> +++ b/xen/common/xenoprof.c
> @@ -144,8 +144,8 @@ share_xenoprof_page_with_guest(struct do
> struct page_info *page = mfn_to_page(mfn + i);
> if ( (page->count_info & (PGC_allocated|PGC_count_mask)) != 0 )
> {
> - gdprintk(XENLOG_INFO, "mfn 0x%lx page->count_info 0x%lx\n",
> - mfn + i, (unsigned long)page->count_info);
> + printk(XENLOG_G_INFO "dom%d mfn %#lx page->count_info %#lx\n",
> + d->domain_id, mfn + i, page->count_info);
> return -EBUSY;
> }
> page_set_owner(page, NULL);
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -566,9 +566,9 @@ int iommu_do_domctl(
>
> if ( device_assigned(seg, bus, devfn) )
> {
> - gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: "
> - "%04x:%02x:%02x.%u already assigned, or non-existent\n",
> - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
> + printk(XENLOG_G_INFO
> + "%04x:%02x:%02x.%u already assigned, or non-existent\n",
> + seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
> ret = -EINVAL;
> }
> break;
> @@ -576,8 +576,8 @@ int iommu_do_domctl(
> case XEN_DOMCTL_assign_device:
> if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
> {
> - gdprintk(XENLOG_ERR,
> - "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
> + printk(XENLOG_G_ERR
> + "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
> ret = -EINVAL;
> break;
> }
> @@ -593,9 +593,9 @@ int iommu_do_domctl(
> #ifdef __ia64__ /* XXX Is this really needed? */
> if ( device_assigned(seg, bus, devfn) )
> {
> - gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
> - "%x:%x.%x already assigned, or non-existent\n",
> - bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
> + printk(XENLOG_G_ERR "XEN_DOMCTL_assign_device: "
> + "%04x:%02x:%02x.%u already assigned, or non-existent\n",
> + seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
> ret = -EINVAL;
> goto assign_device_out;
> }
> @@ -603,9 +603,10 @@ int iommu_do_domctl(
>
> ret = assign_device(d, seg, bus, devfn);
> if ( ret )
> - gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
> - "assign device (%04x:%02x:%02x.%u) failed\n",
> - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
> + printk(XENLOG_G_ERR "XEN_DOMCTL_assign_device: "
> + "assign %04x:%02x:%02x.%u to dom%d failed (%d)\n",
> + seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
> + d->domain_id, ret);
>
> assign_device_out:
> put_domain(d);
> @@ -614,8 +615,8 @@ int iommu_do_domctl(
> case XEN_DOMCTL_deassign_device:
> if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
> {
> - gdprintk(XENLOG_ERR,
> - "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
> + printk(XENLOG_G_ERR
> + "XEN_DOMCTL_deassign_device: get_domain_by_id()
> failed\n");
> ret = -EINVAL;
> break;
> }
> @@ -640,9 +641,10 @@ int iommu_do_domctl(
> ret = deassign_device(d, seg, bus, devfn);
> spin_unlock(&pcidevs_lock);
> if ( ret )
> - gdprintk(XENLOG_ERR, "XEN_DOMCTL_deassign_device: "
> - "deassign device (%04x:%02x:%02x.%u) failed\n",
> - seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
> + printk(XENLOG_G_ERR
> + "deassign %04x:%02x:%02x.%u from dom%d failed (%d)\n",
> + seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
> + d->domain_id, ret);
>
> deassign_device_out:
> put_domain(d);
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx
> http://lists.xensource.com/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |