[Xen-devel] [qemu-mainline baseline-only test] 71581: regressions - trouble: blocked/broken/fail/pass

2017-06-16 Thread Platform Team regression test user
This run is configured for baseline tests only.

flight 71581 qemu-mainline real [real]
http://osstest.xs.citrite.net/~osstest/testlogs/logs/71581/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-qemuu-winxpsp3 17 guest-start/win.repeat fail REGR. vs. 
71572

Regressions which are regarded as allowable (not blocking):
 build-armhf-libvirt   5 libvirt-buildfail   like 71572
 build-amd64-libvirt   5 libvirt-buildfail   like 71572
 build-i386-libvirt5 libvirt-buildfail   like 71572
 test-amd64-amd64-qemuu-nested-intel 16 debian-hvm-install/l1/l2 fail like 71572
 test-amd64-i386-xl-qemuu-winxpsp3 17 guest-start/win.repeatfail like 71572
 test-amd64-i386-xl-qemuu-win7-amd64 15 guest-localmigrate/x10  fail like 71572
 test-amd64-amd64-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail like 71572
 test-amd64-i386-xl-qemuu-winxpsp3-vcpus1  9 windows-installfail like 71572

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl   1 build-check(1)   blocked  n/a
 build-arm64-libvirt   1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt-qcow2  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-arm64-arm64-xl-rtds  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-xsm   1 build-check(1)   blocked  n/a
 build-arm64   2 hosts-allocate   broken never pass
 build-arm64-pvops 2 hosts-allocate   broken never pass
 build-arm64-xsm   2 hosts-allocate   broken never pass
 build-arm64   3 capture-logs broken never pass
 build-arm64-xsm   3 capture-logs broken never pass
 build-arm64-pvops 3 capture-logs broken never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-midway   12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-midway   13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass

version targeted for testing:
 qemuuedf8bc98424d62035d5e4c0f39542722d72d7979
baseline version:
 qemuu3f0602927b120a480b35dcf58cf6f95435b3ae91

Last test of basis71572  2017-06-16 03:47:02 Z1 days
Testing same since71581  2017-06-16 22:46:18 Z0 days1 attempts


People who touched revisions under test:
  David Hildenbrand 
  Juan Quintela 
  Peter Maydell 
  Peter Xu 
  Richard Henderson 

jobs:
 build-amd64-xsm   

[Xen-devel] [linux-next test] 110485: regressions - FAIL

2017-06-16 Thread osstest service owner
flight 110485 linux-next real [real]
http://logs.test-lab.xenproject.org/osstest/logs/110485/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-pair20 guest-start/debian   fail REGR. vs. 110464
 test-amd64-amd64-libvirt-pair 20 guest-start/debian  fail REGR. vs. 110464
 test-amd64-i386-libvirt-pair 20 guest-start/debian   fail REGR. vs. 110464
 test-amd64-i386-pair 20 guest-start/debian   fail REGR. vs. 110464
 test-armhf-armhf-xl-vhd   4 host-ping-check-native   fail REGR. vs. 110464

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-xl-qemut-win7-amd64 17 guest-start/win.repeat fail blocked in 
110464
 test-armhf-armhf-xl-rtds 15 guest-start/debian.repeatfail  like 110464
 test-armhf-armhf-libvirt 13 saverestore-support-checkfail  like 110464
 test-armhf-armhf-libvirt-xsm 13 saverestore-support-checkfail  like 110464
 test-amd64-amd64-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail like 110464
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 110464
 test-armhf-armhf-libvirt-raw 12 saverestore-support-checkfail  like 110464
 test-amd64-amd64-xl-rtds  9 debian-install   fail  like 110464
 test-amd64-amd64-xl-qemuu-ws16-amd64  9 windows-installfail never pass
 test-amd64-amd64-xl-qemut-ws16-amd64  9 windows-installfail never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 13 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  13 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl  13 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  13 saverestore-support-checkfail   never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-cubietruck 12 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 13 saverestore-support-checkfail never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-armhf-armhf-libvirt-raw 11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  13 saverestore-support-checkfail   never pass
 test-amd64-i386-xl-qemuu-win10-i386  9 windows-install fail never pass
 test-amd64-i386-xl-qemuu-ws16-amd64  9 windows-install fail never pass
 test-amd64-i386-xl-qemut-win10-i386  9 windows-install fail never pass
 test-amd64-i386-xl-qemut-ws16-amd64  9 windows-install fail never pass
 test-amd64-amd64-xl-qemuu-win10-i386  9 windows-installfail never pass
 test-amd64-amd64-xl-qemut-win10-i386  9 windows-installfail never pass

version targeted for testing:
 linux82341321f25cf732ae136db95f916ed9f22dc68e
baseline version:
 linuxa090bd4ff8387c409732a8e059fbf264ea0bdd56

Last test of basis  (not found) 
Failing since   (not found) 
Testing same since   110485  2017-06-16 09:45:18 Z0 days1 attempts

jobs:
 

[Xen-devel] [xen-unstable test] 110484: regressions - FAIL

2017-06-16 Thread osstest service owner
flight 110484 xen-unstable real [real]
http://logs.test-lab.xenproject.org/osstest/logs/110484/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-i386-xl-qemut-win7-amd64 15 guest-localmigrate/x10 fail REGR. vs. 
110465
 test-amd64-amd64-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail REGR. vs. 
110465
 test-amd64-i386-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail REGR. vs. 
110465
 build-armhf-pvops 5 kernel-build fail REGR. vs. 110465

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked  n/a
 test-armhf-armhf-examine  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl   1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-vhd   1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-credit2   1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-cubietruck  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-rtds  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-arndale   1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-armhf-armhf-xl-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-xl-qemut-win7-amd64 15 guest-localmigrate/x10 fail like 110465
 test-amd64-amd64-xl-rtds  9 debian-install   fail  like 110465
 build-amd64-prev  6 xen-build/dist-test  fail   never pass
 test-amd64-amd64-xl-qemut-ws16-amd64  9 windows-installfail never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-qemuu-ws16-amd64  9 windows-installfail never pass
 build-i386-prev   6 xen-build/dist-test  fail   never pass
 test-arm64-arm64-xl-credit2  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  13 saverestore-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 13 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  13 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-i386-xl-qemut-win10-i386  9 windows-install fail never pass
 test-amd64-i386-xl-qemuu-win10-i386  9 windows-install fail never pass
 test-amd64-amd64-xl-qemut-win10-i386  9 windows-installfail never pass
 test-amd64-amd64-xl-qemuu-win10-i386  9 windows-installfail never pass
 test-amd64-i386-xl-qemuu-ws16-amd64  9 windows-install fail never pass
 test-amd64-i386-xl-qemut-ws16-amd64  9 windows-install fail never pass

version targeted for testing:
 xen  534ecddd8a961a44356fcab576bd68d6900bfa74
baseline version:
 xen  695bb5f504ab48c1d546446f104c1b6c0ead126d

Last test of basis   110465  2017-06-15 09:46:33 Z1 days
Testing same since   110484  2017-06-16 09:32:22 Z0 days1 attempts


People who touched revisions under test:
  Julien Grall 
  Tamas K Lengyel 

jobs:
 build-amd64-xsm  pass
 build-arm64-xsm  pass
 build-armhf-xsm  pass
 build-i386-xsm   pass
 build-amd64-xtf  pass
 build-amd64  pass
 build-arm64  pass
 build-armhf  pass
 build-i386   pass
 build-amd64-libvirt  pass
 build-arm64-libvirt   

Re: [Xen-devel] Notes on stubdoms and latency on ARM

2017-06-16 Thread Volodymyr Babchuk
Hello Juilen,
>> The polling can be minimized if you block the vCPU when there are
>> nothing to do. It would get unblock when you have to schedule him
>> because of a request.
> Thinking a bit more about this. So far, we rely on the domain to use the
> vGIC interrupt controller which require the context switch.
>
> We could also implement a dummy interrupt controller to handle a predefined
> limited amount of interrupts which would allow asynchronous support in
> stubdom and an interface to support upcall via the interrupt exception
> vector.
>
> This is something that would be more tricky to do with EL0 app as there is
> no EL0 vector exception.
>
Actually, your idea about blocking vcpu is very interesting. Then we
don't need vGIC at all. For example, when stubdomain have finished
handling request, it can issue hypercall "block me until new
requests". XEN blocks vcpu at this moment and unblocks it only when
there are another request ready. This is very promising idea. Need to
think about it further.

-- 
WBR Volodymyr Babchuk aka lorc [+380976646013]
mailto: vlad.babc...@gmail.com

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [linux-4.1 baseline-only test] 71580: regressions - trouble: blocked/broken/fail/pass

2017-06-16 Thread Platform Team regression test user
This run is configured for baseline tests only.

flight 71580 linux-4.1 real [real]
http://osstest.xs.citrite.net/~osstest/testlogs/logs/71580/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-qemut-stubdom-debianhvm-amd64-xsm 15 
guest-localmigrate/x10 fail REGR. vs. 71509
 test-amd64-amd64-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail REGR. vs. 
71509
 test-amd64-i386-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail REGR. vs. 
71509
 test-amd64-i386-xl-qemut-winxpsp3-vcpus1 17 guest-start/win.repeat fail REGR. 
vs. 71509

Regressions which are regarded as allowable (not blocking):
 test-amd64-i386-xl-qemut-win7-amd64 16 guest-stop fail REGR. vs. 71509
 build-amd64-libvirt   5 libvirt-buildfail   like 71509
 build-armhf-libvirt   5 libvirt-buildfail   like 71509
 build-i386-libvirt5 libvirt-buildfail   like 71509
 test-amd64-amd64-qemuu-nested-intel 16 debian-hvm-install/l1/l2 fail like 71509

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-libvirt-vhd  1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl   1 build-check(1)   blocked  n/a
 build-arm64-libvirt   1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt-qcow2  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-pair  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked  n/a
 test-arm64-arm64-libvirt  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-credit2   1 build-check(1)   blocked  n/a
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked n/a
 test-arm64-arm64-xl-rtds  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-multivcpu  1 build-check(1)   blocked  n/a
 test-armhf-armhf-libvirt-xsm  1 build-check(1)   blocked  n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked  n/a
 test-arm64-arm64-xl-xsm   1 build-check(1)   blocked  n/a
 build-arm64   2 hosts-allocate   broken never pass
 build-arm64-pvops 2 hosts-allocate   broken never pass
 build-arm64-xsm   2 hosts-allocate   broken never pass
 build-arm64-xsm   3 capture-logs broken never pass
 build-arm64   3 capture-logs broken never pass
 build-arm64-pvops 3 capture-logs broken never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-midway   12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-midway   13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-amd64-amd64-xl-qemut-win7-amd64 17 guest-start/win.repeat fail never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass

version targeted for testing:
 linux4bd4cfc5210ef2f9002e54a16334a56acd295e4b
baseline version:
 linux56d847e3ef9433d7ac92376e4ba49d3cf3cb70d2

Last test of basis71509  2017-06-05 06:52:37 Z   11 days
Testing same since71580  2017-06-16 17:47:33 Z0 days1 attempts


People who touched revisions under test:
  "Eric W. Biederman" 
  Adrian Hunter 
  Al Stone 

Re: [Xen-devel] Notes on stubdoms and latency on ARM

2017-06-16 Thread Volodymyr Babchuk
Hello George,

On 31 May 2017 at 20:02, George Dunlap  wrote:
>>> There is no way out: if the stubdom needs events, then we'll have to
>>> expose and context switch the vGIC. If it doesn't, then we can skip the
>>> vGIC. However, we would have a similar problem with EL0 apps: I am
>>> assuming that EL0 apps don't need to handle interrupts, but if they do,
>>> then they might need something like a vGIC.
>> Hm. Correct me, but if we want make stubdom to handle some requests
>> (e.g. emulate MMIO access), then it needs events, and thus it needs
>> interrupts. At least, I'm not aware about any other mechanism, that
>> allows hypervisor to signal to a domain.
>> On other hand, EL0 app (as I see them) does not need such events.
>> Basically, you just call function `handle_mmio()` right in the app.
>> So, apps can live without interrupts and they still be able to handle
>> request.
>
> So remember that "interrupt" and "event" are basically the same as
> "structured callback".  When anything happens that Xen wants to tell the
> EL0 app about, it has to have a way of telling it.  If the EL0 app is
> handling a device, it has to have some way of getting interrupts from
> that device; if it needs to emulate devices sent to the guest, it needs
> some way to tell Xen to deliver an interrupt to the guest.
Basically yes. There should be mechanism to request something from
native application. Question is how this mechanism can be implemented.
Classical approach is a even-driven loop:

while(1) {
wait_for_event();
handle_event_event();
return_back_results();
}

wait_for_event() can by anything from WFI instruction to read() on
socket. This is how stubdoms are working. I agree with you: there are
no sense to repeat this in native apps.

> Now, we could make the EL0 app interface "interruptless".  Xen could
> write information about pending events in a shared memory region, and
> the EL0 app could check that before calling some sort of block()
> hypercall, and check it again when it returns from the block() call.

> But the shared event information starts to look an awful lot like events
> and/or pending bits on an interrupt controller -- the only difference
> being that you aren't interrupted if you're already running.

Actually there are third way, which I have used. I described it in
original email (check out [1]).
Basically, native application is dead until it is needed by
hypervisor. When hypervisor wants some services from app, it setups
parameters, switches mode to EL0 and jumps at app entry point.
> I'm pretty sure you could run in this mode using the existing interfaces
> if you didn't want the hassle of dealing with asynchrony.  If that's the
> case, then why bother inventing an entirely new interface, with its own
> bugs and duplication of functionality?  Why not just use what we already
> have?
Because we are concerned about latency. In my benchmark, my native app
PoC is 1.6 times faster than stubdom.


[1] http://marc.info/?l=xen-devel=149151018801649=2


-- 
WBR Volodymyr Babchuk aka lorc [+380976646013]
mailto: vlad.babc...@gmail.com

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [qemu-mainline test] 110478: tolerable FAIL - PUSHED

2017-06-16 Thread osstest service owner
flight 110478 qemu-mainline real [real]
http://logs.test-lab.xenproject.org/osstest/logs/110478/

Failures :-/ but no regressions.

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-libvirt-xsm 13 saverestore-support-checkfail  like 110458
 test-armhf-armhf-libvirt 13 saverestore-support-checkfail  like 110458
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 110458
 test-amd64-amd64-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail like 110458
 test-armhf-armhf-libvirt-raw 12 saverestore-support-checkfail  like 110458
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-qemuu-ws16-amd64  9 windows-installfail never pass
 test-arm64-arm64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-arm64-arm64-xl-credit2  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  13 saverestore-support-checkfail   never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-arm64-arm64-xl-xsm  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-cubietruck 12 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 13 saverestore-support-checkfail never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 11 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-amd64-i386-xl-qemuu-win10-i386  9 windows-install fail never pass
 test-amd64-amd64-xl-qemuu-win10-i386  9 windows-installfail never pass
 test-amd64-i386-xl-qemuu-ws16-amd64  9 windows-install fail never pass

version targeted for testing:
 qemuuedf8bc98424d62035d5e4c0f39542722d72d7979
baseline version:
 qemuu3f0602927b120a480b35dcf58cf6f95435b3ae91

Last test of basis   110458  2017-06-15 02:35:43 Z1 days
Testing same since   110478  2017-06-16 03:24:46 Z0 days1 attempts


People who touched revisions under test:
  David Hildenbrand 
  Juan Quintela 
  Peter Maydell 
  Peter Xu 
  Richard Henderson 

jobs:
 build-amd64-xsm  pass
 build-arm64-xsm  pass
 build-armhf-xsm  pass
 build-i386-xsm   pass
 build-amd64  pass
 build-arm64  pass
 build-armhf  pass
 build-i386   pass
 build-amd64-libvirt  pass
 

Re: [Xen-devel] [PATCH 1/2] arm: smccc: handle SMCs/HVCs according to SMCCC

2017-06-16 Thread Stefano Stabellini
On Wed, 14 Jun 2017, Volodymyr Babchuk wrote:
> SMCCC (SMC Call Convention) describes how to handle both HVCs and SMCs.
> SMCCC states that both HVC and SMC are valid conduits to call to a different
> firmware functions. Thus, for example PSCI calls can be made both by
> SMC or HVC. Also SMCCC defines function number coding for such calls.
> Besides functional calls there are query calls, which allows underling
> OS determine version, UID and number of functions provided by service
> provider.
> 
> This patch adds new file `smccc.c`, which handles both generic SMCs
> and HVC according to SMC. At this moment it implements only one
> service: Standard Hypervisor Service.
> 
> Standard Hypervisor Service only supports query calls, so caller can
> ask about hypervisor UID and determine that it is XEN running.
> 
> This change allows more generic handling for SMCs and HVCs and it can
> be easily extended to support new services and functions.
> 
> Signed-off-by: Volodymyr Babchuk 
> Reviewed-by: Oleksandr Andrushchenko 
> Reviewed-by: Oleksandr Tyshchenko 
> ---
>  xen/arch/arm/Makefile   |  1 +
>  xen/arch/arm/smccc.c| 96 
> +
>  xen/arch/arm/traps.c| 10 -
>  xen/include/asm-arm/smccc.h | 89 +
>  4 files changed, 194 insertions(+), 2 deletions(-)
>  create mode 100644 xen/arch/arm/smccc.c
>  create mode 100644 xen/include/asm-arm/smccc.h
> 
> diff --git a/xen/arch/arm/Makefile b/xen/arch/arm/Makefile
> index 49e1fb2..b8728cf 100644
> --- a/xen/arch/arm/Makefile
> +++ b/xen/arch/arm/Makefile
> @@ -39,6 +39,7 @@ obj-y += psci.o
>  obj-y += setup.o
>  obj-y += shutdown.o
>  obj-y += smc.o
> +obj-y += smccc.o
>  obj-y += smp.o
>  obj-y += smpboot.o
>  obj-y += sysctl.o
> diff --git a/xen/arch/arm/smccc.c b/xen/arch/arm/smccc.c
> new file mode 100644
> index 000..5d10964
> --- /dev/null
> +++ b/xen/arch/arm/smccc.c
> @@ -0,0 +1,96 @@
> +/*
> + * xen/arch/arm/smccc.c
> + *
> + * Generic handler for SMC and HVC calls according to
> + * ARM SMC callling convention
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + */
> +
> +
> +#include 
> +#include 
> +#include 
> +/* Need to include xen/sched.h before asm/domain.h or it breaks build*/
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#define XEN_SMCCC_UID ARM_SMCCC_UID(0xa71812dc, 0xc698, 0x4369, \
> +0x9a, 0xcf, 0x79, 0xd1, \
> +0x8d, 0xde, 0xe6, 0x67)
> +
> +/*
> + * We can't use XEN version here:
> + * Major revision should change every time SMC/HVC function is removed.
> + * Minor revision should change every time SMC/HVC function is added.
> + * So, it is SMCCC protocol revision code, not XEN version
> + */
> +#define XEN_SMCCC_MAJOR_REVISION 0
> +#define XEN_SMCCC_MINOR_REVISION 1
> +#define XEN_SMCCC_FUNCTION_COUNT 3

Both ARM_SMCCC_UID, and XEN_SMCCC_MAJOR/MINOR_REVISION become part of the
Xen public ABI. Please explain in the commit message why you chose them
as indentifier, and add them to a separate new header file under
xen/include/public/arch-arm/ (because they are public).



> +/* SMCCC interface for hypervisor. Tell about self */
> +static bool handle_hypervisor(struct cpu_user_regs *regs, const union hsr 
> hsr)
> +{
> +switch ( ARM_SMCCC_FUNC_NUM(get_user_reg(regs, 0)) )
> +{
> +case ARM_SMCCC_FUNC_CALL_COUNT:
> +set_user_reg(regs, 0, XEN_SMCCC_FUNCTION_COUNT);
> +return true;
> +case ARM_SMCCC_FUNC_CALL_UID:
> +set_user_reg(regs, 0, XEN_SMCCC_UID.a[0]);
> +set_user_reg(regs, 1, XEN_SMCCC_UID.a[1]);
> +set_user_reg(regs, 2, XEN_SMCCC_UID.a[2]);
> +set_user_reg(regs, 3, XEN_SMCCC_UID.a[3]);
> +return true;
> +case ARM_SMCCC_FUNC_CALL_REVISION:
> +set_user_reg(regs, 0, XEN_SMCCC_MAJOR_REVISION);
> +set_user_reg(regs, 1, XEN_SMCCC_MINOR_REVISION);
> +return true;
> +}
> +return false;
> +}

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 03/24] xen/arm: setup: Remove bogus xenheap_mfn_end in setup_mm for arm64

2017-06-16 Thread Stefano Stabellini
On Fri, 16 Jun 2017, Julien Grall wrote:
> Hi Stefano,
> 
> On 06/16/2017 06:33 PM, Stefano Stabellini wrote:
> > On Fri, 16 Jun 2017, Julien Grall wrote:
> > > Hi Stefano,
> > > 
> > > On 15/06/2017 23:28, Stefano Stabellini wrote:
> > > > On Tue, 13 Jun 2017, Julien Grall wrote:
> > > > > xenheap_mfn_end is storing an MFN and not a physical address.
> > > > > Thankfully
> > > > > xenheap_mfn_end is not used in the arm64 code. So drop it.
> > > > 
> > > > That's fine, but in that case I would prefer to move the definition of
> > > > xenheap_mfn_end under #ifdef CONFIG_ARM_32. In fact, there is another
> > > > assignment of xenheap_mfn_end few lines below in the arm64 version of
> > > > setup_mm: don't we need to remove that too?
> > > 
> > > The other xenheap_mfn_end contains valid mfn that point to the end and I
> > > didn't want to #ifdef it because:
> > >   1) It complexify the code
> > >   2) All regions should be bound with start/end to simplify potential
> > > use.
> > 
> > I am only suggesting to move its definition and declaration under #ifdef
> > CONFIG_ARM_32 in xen/include/asm-arm/mm.h and xen/arch/arm/mm.c.
> > 
> > After that, all users of xenheap_mfn_end are already #ifdef
> > CONFIG_ARM_32, except for xen/arch/arm/setup.c:setup_mm. The setup_mm
> > under #ifdef CONFIG_ARM_32 will be fine. The setup_mm under
> > #ifdef CONFIG_ARM_64, doesn't need xenheap_mfn_end and we could just
> > remove it from there.
> > 
> > Does it make sense? Am I missing something?
> 
> To be honest, I really want to limit the ifdefery in the mm code. This is a
> bit complex to follow. One of my side project is to look at that.
> 
> Also, even if xenheap_mfn_end today is not used, I think the current value is
> valid and could be helpful to have in hand. For instance, it does not seem
> justify to have different implementation of at least is_xen_heap_page for
> arm32 and arm64.
> 
> So I am not in favor of dropping xenheap_mfn_end at the moment.

All right, then if we are going to keep xenheap_mfn_end around on arm64,
please update the commit message of this patch because it is confusing. 
It is just this one instance of xenheap_mfn_end in setup_mm which is
superfluous on arm64 because we are setting it again later.

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 0/2] xen/arm: Move LPAE definition in a separate header.

2017-06-16 Thread Stefano Stabellini
Please append these two patches at the end of the remaining set of the
"xen/arm: Extend the usage of typesafe MFN" series, when you repost.


On Thu, 15 Jun 2017, Julien Grall wrote:
> This small patch series is moving out LPAE definition from page.h. This is
> based on my series "xen/arm: Extend usage of typesafe MFN" [1] due to a small
> conflict with patch #5.
> 
> Cheers,
> 
> [1] https://lists.xen.org/archives/html/xen-devel/2017-06/msg01361.html
> 
> Julien Grall (2):
>   xen/arm: Move LPAE definition in a separate header
>   xen/arm: lpae: Fix comments coding style
> 
>  xen/include/asm-arm/lpae.h | 184 
> +
>  xen/include/asm-arm/page.h | 152 +
>  2 files changed, 185 insertions(+), 151 deletions(-)
>  create mode 100644 xen/include/asm-arm/lpae.h
> 
> -- 
> 2.11.0
> 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 2/2] xen/arm: lpae: Fix comments coding style

2017-06-16 Thread Stefano Stabellini
On Thu, 15 Jun 2017, Julien Grall wrote:
> Also adding one missing full stop.
> 
> Signed-off-by: Julien Grall 

Reviewed-by: Stefano Stabellini 

>  xen/include/asm-arm/lpae.h | 45 ++---
>  1 file changed, 30 insertions(+), 15 deletions(-)
> 
> diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h
> index 1e6a68926e..6244240ca0 100644
> --- a/xen/include/asm-arm/lpae.h
> +++ b/xen/include/asm-arm/lpae.h
> @@ -3,10 +3,12 @@
>  
>  #ifndef __ASSEMBLY__
>  
> -/* WARNING!  Unlike the Intel pagetable code, where l1 is the lowest
> +/*
> + * WARNING!  Unlike the Intel pagetable code, where l1 is the lowest
>   * level and l4 is the root of the trie, the ARM pagetables follow ARM's
>   * documentation: the levels are called first, second  in the order
> - * that the MMU walks them (i.e. "first" is the root of the trie). */
> + * that the MMU walks them (i.e. "first" is the root of the trie).
> + */
>  
>  
> /**
>   * ARMv7-A LPAE pagetables: 3-level trie, mapping 40-bit input to
> @@ -17,15 +19,18 @@
>   * different place from those in leaf nodes seems to be to allow linear
>   * pagetable tricks.  If we're not doing that then the set of permission
>   * bits that's not in use in a given node type can be used as
> - * extra software-defined bits. */
> + * extra software-defined bits.
> + */
>  
>  typedef struct __packed {
>  /* These are used in all kinds of entry. */
>  unsigned long valid:1;  /* Valid mapping */
>  unsigned long table:1;  /* == 1 in 4k map entries too */
>  
> -/* These ten bits are only used in Block entries and are ignored
> - * in Table entries. */
> +/*
> + * These ten bits are only used in Block entries and are ignored
> + * in Table entries.
> + */
>  unsigned long ai:3; /* Attribute Index */
>  unsigned long ns:1; /* Not-Secure */
>  unsigned long user:1;   /* User-visible */
> @@ -38,30 +43,38 @@ typedef struct __packed {
>  unsigned long long base:36; /* Base address of block or next table */
>  unsigned long sbz:4;/* Must be zero */
>  
> -/* These seven bits are only used in Block entries and are ignored
> - * in Table entries. */
> +/*
> + * These seven bits are only used in Block entries and are ignored
> + * in Table entries.
> + */
>  unsigned long contig:1; /* In a block of 16 contiguous entries */
>  unsigned long pxn:1;/* Privileged-XN */
>  unsigned long xn:1; /* eXecute-Never */
>  unsigned long avail:4;  /* Ignored by hardware */
>  
> -/* These 5 bits are only used in Table entries and are ignored in
> - * Block entries */
> +/*
> + * These 5 bits are only used in Table entries and are ignored in
> + * Block entries.
> + */
>  unsigned long pxnt:1;   /* Privileged-XN */
>  unsigned long xnt:1;/* eXecute-Never */
>  unsigned long apt:2;/* Access Permissions */
>  unsigned long nst:1;/* Not-Secure */
>  } lpae_pt_t;
>  
> -/* The p2m tables have almost the same layout, but some of the permission
> - * and cache-control bits are laid out differently (or missing) */
> +/*
> + * The p2m tables have almost the same layout, but some of the permission
> + * and cache-control bits are laid out differently (or missing).
> + */
>  typedef struct __packed {
>  /* These are used in all kinds of entry. */
>  unsigned long valid:1;  /* Valid mapping */
>  unsigned long table:1;  /* == 1 in 4k map entries too */
>  
> -/* These ten bits are only used in Block entries and are ignored
> - * in Table entries. */
> +/*
> + * These ten bits are only used in Block entries and are ignored
> + * in Table entries.
> + */
>  unsigned long mattr:4;  /* Memory Attributes */
>  unsigned long read:1;   /* Read access */
>  unsigned long write:1;  /* Write access */
> @@ -73,8 +86,10 @@ typedef struct __packed {
>  unsigned long long base:36; /* Base address of block or next table */
>  unsigned long sbz3:4;
>  
> -/* These seven bits are only used in Block entries and are ignored
> - * in Table entries. */
> +/*
> + * These seven bits are only used in Block entries and are ignored
> + * in Table entries.
> + */
>  unsigned long contig:1; /* In a block of 16 contiguous entries */
>  unsigned long sbz2:1;
>  unsigned long xn:1; /* eXecute-Never */
> -- 
> 2.11.0
> 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 1/2] xen/arm: Move LPAE definition in a separate header

2017-06-16 Thread Stefano Stabellini
On Thu, 15 Jun 2017, Julien Grall wrote:
> page.h is getting bigger. Move out every LPAE definitions in a separate
> header. There is no functional changes.
> 
> Signed-off-by: Julien Grall 
> ---
>  xen/include/asm-arm/lpae.h | 169 
> +
>  xen/include/asm-arm/page.h | 152 +---
>  2 files changed, 170 insertions(+), 151 deletions(-)
>  create mode 100644 xen/include/asm-arm/lpae.h
> 
> diff --git a/xen/include/asm-arm/lpae.h b/xen/include/asm-arm/lpae.h
> new file mode 100644
> index 00..1e6a68926e
> --- /dev/null
> +++ b/xen/include/asm-arm/lpae.h
> @@ -0,0 +1,169 @@
> +#ifndef __ARM_LPAE_H__
> +#define __ARM_LPAE_H__
> +
> +#ifndef __ASSEMBLY__
> +
> +/* WARNING!  Unlike the Intel pagetable code, where l1 is the lowest
> + * level and l4 is the root of the trie, the ARM pagetables follow ARM's
> + * documentation: the levels are called first, second  in the order
> + * that the MMU walks them (i.e. "first" is the root of the trie). */
> +
> +/**
> + * ARMv7-A LPAE pagetables: 3-level trie, mapping 40-bit input to
> + * 40-bit output addresses.  Tables at all levels have 512 64-bit entries
> + * (i.e. are 4Kb long).
> + *
> + * The bit-shuffling that has the permission bits in branch nodes in a
> + * different place from those in leaf nodes seems to be to allow linear
> + * pagetable tricks.  If we're not doing that then the set of permission
> + * bits that's not in use in a given node type can be used as
> + * extra software-defined bits. */
> +
> +typedef struct __packed {
> +/* These are used in all kinds of entry. */
> +unsigned long valid:1;  /* Valid mapping */
> +unsigned long table:1;  /* == 1 in 4k map entries too */
> +
> +/* These ten bits are only used in Block entries and are ignored
> + * in Table entries. */
> +unsigned long ai:3; /* Attribute Index */
> +unsigned long ns:1; /* Not-Secure */
> +unsigned long user:1;   /* User-visible */
> +unsigned long ro:1; /* Read-Only */
> +unsigned long sh:2; /* Shareability */
> +unsigned long af:1; /* Access Flag */
> +unsigned long ng:1; /* Not-Global */
> +
> +/* The base address must be appropriately aligned for Block entries */
> +unsigned long long base:36; /* Base address of block or next table */
> +unsigned long sbz:4;/* Must be zero */
> +
> +/* These seven bits are only used in Block entries and are ignored
> + * in Table entries. */
> +unsigned long contig:1; /* In a block of 16 contiguous entries */
> +unsigned long pxn:1;/* Privileged-XN */
> +unsigned long xn:1; /* eXecute-Never */
> +unsigned long avail:4;  /* Ignored by hardware */
> +
> +/* These 5 bits are only used in Table entries and are ignored in
> + * Block entries */
> +unsigned long pxnt:1;   /* Privileged-XN */
> +unsigned long xnt:1;/* eXecute-Never */
> +unsigned long apt:2;/* Access Permissions */
> +unsigned long nst:1;/* Not-Secure */
> +} lpae_pt_t;
> +
> +/* The p2m tables have almost the same layout, but some of the permission
> + * and cache-control bits are laid out differently (or missing) */
> +typedef struct __packed {
> +/* These are used in all kinds of entry. */
> +unsigned long valid:1;  /* Valid mapping */
> +unsigned long table:1;  /* == 1 in 4k map entries too */
> +
> +/* These ten bits are only used in Block entries and are ignored
> + * in Table entries. */
> +unsigned long mattr:4;  /* Memory Attributes */
> +unsigned long read:1;   /* Read access */
> +unsigned long write:1;  /* Write access */
> +unsigned long sh:2; /* Shareability */
> +unsigned long af:1; /* Access Flag */
> +unsigned long sbz4:1;
> +
> +/* The base address must be appropriately aligned for Block entries */
> +unsigned long long base:36; /* Base address of block or next table */
> +unsigned long sbz3:4;
> +
> +/* These seven bits are only used in Block entries and are ignored
> + * in Table entries. */
> +unsigned long contig:1; /* In a block of 16 contiguous entries */
> +unsigned long sbz2:1;
> +unsigned long xn:1; /* eXecute-Never */
> +unsigned long type:4;   /* Ignore by hardware. Used to store p2m 
> types */
> +
> +unsigned long sbz1:5;
> +} lpae_p2m_t;
> +
> +/* Permission mask: xn, write, read */
> +#define P2M_PERM_MASK (0x004000C0ULL)
> +#define P2M_CLEAR_PERM(pte) ((pte).bits & ~P2M_PERM_MASK)
> +
> +/*
> + * Walk is the common bits of p2m and pt entries which are needed to
> + * simply walk the table (e.g. for debug).
> + */
> +typedef struct __packed {
> +/* These are used in all kinds of entry. */
> +

[Xen-devel] [PATCH] docs: improve ARM passthrough doc

2017-06-16 Thread Stefano Stabellini
Add a warning: use passthrough with care.

Add a pointer to the gic device tree bindings. Add an explanation on how
to calculate irq numbers from device tree.

Add a brief explanation of the reg property and a pointer to the xl docs
for a description of the iomem property. Add a note that in the example
we are using different memory addresses for guests and host.

Signed-off-by: Stefano Stabellini 

diff --git a/docs/misc/arm/passthrough.txt b/docs/misc/arm/passthrough.txt
index 082e9ab..7140a61 100644
--- a/docs/misc/arm/passthrough.txt
+++ b/docs/misc/arm/passthrough.txt
@@ -12,7 +12,11 @@ property "xen,passthrough". The command to do it in U-Boot 
is:
 2) Create a partial device tree describing the device. The IRQ are mapped
 1:1 to the guest (i.e VIRQ == IRQ). For MMIO, you will have to find a hole
 in the guest memory layout (see xen/include/public/arch-arm.h, note that
-the layout is not stable and can change between versions of Xen).
+the layout is not stable and can change between versions of Xen). Please
+be aware that passing a partial device tree to a VM is a powerful tool,
+use it with care. In production, only allow assignment of devices which
+have been previously tested and known to work correctly when given to
+guests. 
 
 /dts-v1/;
 
@@ -48,6 +52,8 @@ Note:
 - #size-cells
 * See http://www.devicetree.org/Device_Tree_Usage for more
 information about device tree.
+* In this example, the device MMIO region is placed at a different
+address (0x1000) compared to the host address (0xfff51000)
 
 3) Compile the partial guest device with dtc (Device Tree Compiler).
 For our purpose, the compiled file will be called guest-midway.dtb and
@@ -60,3 +66,16 @@ dtdev = [ "/soc/ethernet@fff51000" ]
 irqs = [ 112, 113, 114 ]
 iomem = [ "0xfff51,1@0x1" ]
 
+Please refer to your platform docs for the MMIO ranges and interrupts.
+
+They can also be calculated from the original device tree (not
+recommended). See [arm,gic.txt] in the Linux repository for a
+description of the "interrupts" property format. For the GIC, the first
+cell is interrupt type, and the second cell is the interrupt number.
+Given that SPI numbers start from 32, in this example 80 + 32 = 112. 
+
+See man [xl.cfg] for the iomem format. The reg property is just a pair
+of address, then size nunbers, each of them can occupy 1 or 2 cells.
+
+[arm,gic.txt]: 
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
+[xl.cfg]: https://xenbits.xen.org/docs/unstable/man/xl.cfg.5.html

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH] xen: idle_loop: either deal with tasklets or go idle

2017-06-16 Thread Dario Faggioli
On Fri, 2017-06-16 at 10:41 -0700, Stefano Stabellini wrote:
> On Fri, 16 Jun 2017, Dario Faggioli wrote:
> > 
> > diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> > index 76310ed..86cd612 100644
> > --- a/xen/arch/arm/domain.c
> > +++ b/xen/arch/arm/domain.c
> > @@ -41,20 +41,28 @@ DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
> >  
> >  void idle_loop(void)
> >  {
> > +unsigned int cpu = smp_processor_id();
> > +
> >  for ( ; ; )
> >  {
> > -if ( cpu_is_offline(smp_processor_id()) )
> > +if ( cpu_is_offline(cpu) )
> >  stop_cpu();
> >  
> > -local_irq_disable();
> > -if ( cpu_is_haltable(smp_processor_id()) )
> > +/* Are we here for running vcpu context tasklets, or for
> > idling? */
> > +if ( cpu_is_haltable(cpu) )
> >  {
> > -dsb(sy);
> > -wfi();
> > +local_irq_disable();
> > +/* We need to check again, with IRQ disabled */
> > +if ( cpu_is_haltable(cpu) )
> > +{
> > +dsb(sy);
> > +wfi();
> > +}
> > +local_irq_enable();
> >  }
> > -local_irq_enable();
> > +else
> > +do_tasklet();
> >  
> > -do_tasklet();
> >  do_softirq();
> 
> Are you sure you want to check that cpu_is_haltable twice? It doesn't
> make sense to me.
>
It's because of IRQ being disabled the first time.

But anyway, discard this patch. I'll go back to (a slightly modified
version of) the first one I sent, which defines a tasklet specific
helper function.

I'll send it on Monday.

Regards,
Dario
-- 
<> (Raistlin Majere)
-
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R Ltd., Cambridge (UK)

signature.asc
Description: This is a digitally signed message part
___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [xen-4.9-testing test] 110474: regressions - FAIL

2017-06-16 Thread osstest service owner
flight 110474 xen-4.9-testing real [real]
http://logs.test-lab.xenproject.org/osstest/logs/110474/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-qemut-win7-amd64 15 guest-localmigrate/x10 fail REGR. vs. 
110417
 test-amd64-i386-xl-qemut-debianhvm-amd64 15 guest-localmigrate/x10 fail REGR. 
vs. 110417
 test-amd64-amd64-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail REGR. vs. 
110417

Tests which are failing intermittently (not blocking):
 test-amd64-i386-xl-qemut-win7-amd64 15 guest-localmigrate/x10 fail in 110453 
pass in 110474
 test-amd64-i386-rumprun-i386 16 rumprun-demo-xenstorels/xenstorels.repeat fail 
pass in 110453
 test-armhf-armhf-xl-rtds 11 guest-startfail pass in 110453

Tests which did not succeed, but are not blocking:
 test-amd64-i386-xl-qemuu-win7-amd64 17 guest-start/win.repeat fail in 110453 
blocked in 110417
 test-armhf-armhf-xl-rtds 15 guest-start/debian.repeat fail in 110453 like 
110417
 test-armhf-armhf-xl-rtds12 migrate-support-check fail in 110453 never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-check fail in 110453 never pass
 test-amd64-i386-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail like 110417
 test-amd64-i386-xl-qemut-win7-amd64 16 guest-stop fail like 110417
 test-amd64-amd64-xl-rtds  9 debian-install   fail  like 110417
 build-arm64-xsm   6 xen-build/dist-test  fail   never pass
 build-amd64-xsm   6 xen-build/dist-test  fail   never pass
 build-amd64-prev  6 xen-build/dist-test  fail   never pass
 build-i3866 xen-build/dist-test  fail   never pass
 build-i386-xsm6 xen-build/dist-test  fail   never pass
 test-amd64-amd64-livepatch   10 livepatch-runfail   never pass
 build-amd64   6 xen-build/dist-test  fail   never pass
 test-amd64-i386-livepatch10 livepatch-runfail   never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-qemuu-ws16-amd64  9 windows-installfail never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 build-arm64   6 xen-build/dist-test  fail   never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-xsm  13 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-xl-qemut-ws16-amd64  9 windows-installfail never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-arm64-arm64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  12 migrate-support-checkfail   never pass
 build-i386-prev   6 xen-build/dist-test  fail   never pass
 test-armhf-armhf-xl-arndale  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 build-armhf-xsm   6 xen-build/dist-test  fail   never pass
 test-armhf-armhf-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-cubietruck 12 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 13 saverestore-support-checkfail never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  12 migrate-support-checkfail   never pass
 test-arm64-arm64-xl-credit2  13 saverestore-support-checkfail   never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 11 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 12 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  12 

Re: [Xen-devel] [PATCH v3 2/2] xen-access: write_ctrlreg_c4 test

2017-06-16 Thread Razvan Cojocaru
On 06/16/2017 10:20 PM, Petre Pircalabu wrote:
> Add test for write_ctrlreg event handling.
> 
> Signed-off-by: Petre Pircalabu 
> ---
>  tools/tests/xen-access/xen-access.c | 53 
> -
>  1 file changed, 52 insertions(+), 1 deletion(-)
> 
> diff --git a/tools/tests/xen-access/xen-access.c 
> b/tools/tests/xen-access/xen-access.c
> index 238011e..bbf5047 100644
> --- a/tools/tests/xen-access/xen-access.c
> +++ b/tools/tests/xen-access/xen-access.c
> @@ -57,6 +57,13 @@
>  #define X86_TRAP_DEBUG  1
>  #define X86_TRAP_INT3   3
>  
> +/* From xen/include/asm-x86/x86-defns.h */
> +#define X86_CR4_PGE0x0080 /* enable global pages */
> +
> +#ifndef ARRAY_SIZE
> +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
> +#endif
> +
>  typedef struct vm_event {
>  domid_t domain_id;
>  xenevtchn_handle *xce_handle;
> @@ -314,6 +321,24 @@ static void get_request(vm_event_t *vm_event, 
> vm_event_request_t *req)
>  }
>  
>  /*
> + * X86 control register names
> + */
> +static const char* get_x86_ctrl_reg_name(uint32_t index)
> +{
> +static const char* names[] = {
> +[VM_EVENT_X86_CR0]  = "CR0",
> +[VM_EVENT_X86_CR3]  = "CR3",
> +[VM_EVENT_X86_CR4]  = "CR4",
> +[VM_EVENT_X86_XCR0] = "XCR0",
> +};
> +
> +if ( index > ARRAY_SIZE(names) || names[index] == NULL )

I think this probably wants to be index >= ARRAY_SIZE(names).


Thanks,
Razvan

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [libvirt test] 110480: regressions - FAIL

2017-06-16 Thread osstest service owner
flight 110480 libvirt real [real]
http://logs.test-lab.xenproject.org/osstest/logs/110480/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-armhf-armhf-libvirt-xsm  6 xen-boot fail REGR. vs. 110460
 test-armhf-armhf-libvirt-raw 14 guest-start/debian.repeat fail REGR. vs. 110460

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-libvirt 13 saverestore-support-checkfail  like 110460
 test-armhf-armhf-libvirt-raw 12 saverestore-support-checkfail  like 110460
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-xsm 13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-arm64-arm64-libvirt 12 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 11 migrate-support-checkfail   never pass
 test-arm64-arm64-libvirt-qcow2 11 migrate-support-checkfail never pass
 test-arm64-arm64-libvirt-qcow2 12 saverestore-support-checkfail never pass

version targeted for testing:
 libvirt  f1acc4130c4d9a15fcf327f02cfc42de7eca52ff
baseline version:
 libvirt  f0a3fe1b0a2996272dd167501bb5de752d9d1956

Last test of basis   110460  2017-06-15 04:31:18 Z1 days
Testing same since   110480  2017-06-16 04:20:40 Z0 days1 attempts


People who touched revisions under test:
  Daniel P. Berrange 

jobs:
 build-amd64-xsm  pass
 build-arm64-xsm  pass
 build-armhf-xsm  pass
 build-i386-xsm   pass
 build-amd64  pass
 build-arm64  pass
 build-armhf  pass
 build-i386   pass
 build-amd64-libvirt  pass
 build-arm64-libvirt  pass
 build-armhf-libvirt  pass
 build-i386-libvirt   pass
 build-amd64-pvopspass
 build-arm64-pvopspass
 build-armhf-pvopspass
 build-i386-pvops pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm   pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsmpass
 test-amd64-amd64-libvirt-xsm pass
 test-arm64-arm64-libvirt-xsm pass
 test-armhf-armhf-libvirt-xsm fail
 test-amd64-i386-libvirt-xsm  pass
 test-amd64-amd64-libvirt pass
 test-arm64-arm64-libvirt pass
 test-armhf-armhf-libvirt pass
 test-amd64-i386-libvirt  pass
 test-amd64-amd64-libvirt-pairpass
 test-amd64-i386-libvirt-pair pass
 test-arm64-arm64-libvirt-qcow2   pass
 test-armhf-armhf-libvirt-raw fail
 test-amd64-amd64-libvirt-vhd pass



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at

[Xen-devel] [PATCH v3 1/2] x86/monitor: add masking support for write_ctrlreg events

2017-06-16 Thread Petre Pircalabu
Add support for filtering out the write_ctrlreg monitor events if they
are generated only by changing certains bits.
A new parameter (bitmask) was added to the xc_monitor_write_ctrlreg
function in order to mask the event generation if the changed bits are
set.

Signed-off-by: Petre Pircalabu 
Acked-by: Tamas K Lengyel 
---
 tools/libxc/include/xenctrl.h | 2 +-
 tools/libxc/xc_monitor.c  | 5 -
 xen/arch/x86/hvm/monitor.c| 3 ++-
 xen/arch/x86/monitor.c| 9 +
 xen/include/asm-x86/domain.h  | 1 +
 xen/include/public/domctl.h   | 8 
 6 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 1629f41..8c26cb4 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1999,7 +1999,7 @@ int xc_monitor_get_capabilities(xc_interface *xch, 
domid_t domain_id,
 uint32_t *capabilities);
 int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
  uint16_t index, bool enable, bool sync,
- bool onchangeonly);
+ uint64_t bitmask, bool onchangeonly);
 /*
  * A list of MSR indices can usually be found in /usr/include/asm/msr-index.h.
  * Please consult the Intel/AMD manuals for more information on
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index f99b6e3..b44ce93 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -70,7 +70,7 @@ int xc_monitor_get_capabilities(xc_interface *xch, domid_t 
domain_id,
 
 int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
  uint16_t index, bool enable, bool sync,
- bool onchangeonly)
+ uint64_t bitmask, bool onchangeonly)
 {
 DECLARE_DOMCTL;
 
@@ -82,6 +82,9 @@ int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t 
domain_id,
 domctl.u.monitor_op.u.mov_to_cr.index = index;
 domctl.u.monitor_op.u.mov_to_cr.sync = sync;
 domctl.u.monitor_op.u.mov_to_cr.onchangeonly = onchangeonly;
+domctl.u.monitor_op.u.mov_to_cr.bitmask = bitmask;
+domctl.u.monitor_op.u.mov_to_cr.pad1 = 0;
+domctl.u.monitor_op.u.mov_to_cr.pad2 = 0;
 
 return do_domctl(xch, );
 }
diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
index bde5fd0..a7ccfc4 100644
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -38,7 +38,8 @@ bool_t hvm_monitor_cr(unsigned int index, unsigned long 
value, unsigned long old
 
 if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
  (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
-  value != old) )
+  value != old) &&
+ (!((value ^ old) & ad->monitor.write_ctrlreg_mask[index])) )
 {
 bool_t sync = !!(ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask);
 
diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
index 449c64c..bedf13c 100644
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -136,6 +136,9 @@ int arch_monitor_domctl_event(struct domain *d,
 if ( unlikely(mop->u.mov_to_cr.index > 31) )
 return -EINVAL;
 
+if ( unlikely(mop->u.mov_to_cr.pad1 || mop->u.mov_to_cr.pad2) )
+return -EINVAL;
+
 ctrlreg_bitmask = monitor_ctrlreg_bitmask(mop->u.mov_to_cr.index);
 old_status = !!(ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask);
 
@@ -155,9 +158,15 @@ int arch_monitor_domctl_event(struct domain *d,
 ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask;
 
 if ( requested_status )
+{
+ad->monitor.write_ctrlreg_mask[mop->u.mov_to_cr.index] = 
mop->u.mov_to_cr.bitmask;
 ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask;
+}
 else
+{
+ad->monitor.write_ctrlreg_mask[mop->u.mov_to_cr.index] = 0;
 ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;
+}
 
 if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
 {
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 924caac..27d80ee 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -406,6 +406,7 @@ struct arch_domain
 unsigned int cpuid_enabled   : 1;
 unsigned int descriptor_access_enabled   : 1;
 struct monitor_msr_bitmap *msr_bitmap;
+uint64_t write_ctrlreg_mask[4];
 } monitor;
 
 /* Mem_access emulation control */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index f7cbc0a..ff39762 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1107,6 +1107,14 @@ struct xen_domctl_monitor_op {
 uint8_t sync;
 /* Send event only on a change of value */
 uint8_t onchangeonly;
+/* 

[Xen-devel] [PATCH v3 0/2] write_ctrlreg event masking

2017-06-16 Thread Petre Pircalabu

This patchset enables masking the reception of write_ctrlreg events depending
on the value of certain bits in that register.
The most representative example is filtering out events when the CR4.PGE
bit is being flipped (global TLB flushes)

---
Changed since v2
  * fix coding style.
  * use ARRAY_SIZE and named indexes for x86 ctrl register resolution.
  * add allignment padding for xen_domctl_monitor_op.

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v3 2/2] xen-access: write_ctrlreg_c4 test

2017-06-16 Thread Petre Pircalabu
Add test for write_ctrlreg event handling.

Signed-off-by: Petre Pircalabu 
---
 tools/tests/xen-access/xen-access.c | 53 -
 1 file changed, 52 insertions(+), 1 deletion(-)

diff --git a/tools/tests/xen-access/xen-access.c 
b/tools/tests/xen-access/xen-access.c
index 238011e..bbf5047 100644
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -57,6 +57,13 @@
 #define X86_TRAP_DEBUG  1
 #define X86_TRAP_INT3   3
 
+/* From xen/include/asm-x86/x86-defns.h */
+#define X86_CR4_PGE0x0080 /* enable global pages */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
 typedef struct vm_event {
 domid_t domain_id;
 xenevtchn_handle *xce_handle;
@@ -314,6 +321,24 @@ static void get_request(vm_event_t *vm_event, 
vm_event_request_t *req)
 }
 
 /*
+ * X86 control register names
+ */
+static const char* get_x86_ctrl_reg_name(uint32_t index)
+{
+static const char* names[] = {
+[VM_EVENT_X86_CR0]  = "CR0",
+[VM_EVENT_X86_CR3]  = "CR3",
+[VM_EVENT_X86_CR4]  = "CR4",
+[VM_EVENT_X86_XCR0] = "XCR0",
+};
+
+if ( index > ARRAY_SIZE(names) || names[index] == NULL )
+return "";
+
+return names[index];
+}
+
+/*
  * Note that this function is not thread safe.
  */
 static void put_response(vm_event_t *vm_event, vm_event_response_t *rsp)
@@ -337,7 +362,7 @@ void usage(char* progname)
 {
 fprintf(stderr, "Usage: %s [-m]  write|exec", progname);
 #if defined(__i386__) || defined(__x86_64__)
-fprintf(stderr, 
"|breakpoint|altp2m_write|altp2m_exec|debug|cpuid|desc_access");
+fprintf(stderr, 
"|breakpoint|altp2m_write|altp2m_exec|debug|cpuid|desc_access|write_ctrlreg_cr4");
 #elif defined(__arm__) || defined(__aarch64__)
 fprintf(stderr, "|privcall");
 #endif
@@ -369,6 +394,7 @@ int main(int argc, char *argv[])
 int debug = 0;
 int cpuid = 0;
 int desc_access = 0;
+int write_ctrlreg_cr4 = 1;
 uint16_t altp2m_view_id = 0;
 
 char* progname = argv[0];
@@ -439,6 +465,10 @@ int main(int argc, char *argv[])
 {
 desc_access = 1;
 }
+else if ( !strcmp(argv[0], "write_ctrlreg_cr4") )
+{
+write_ctrlreg_cr4 = 1;
+}
 #elif defined(__arm__) || defined(__aarch64__)
 else if ( !strcmp(argv[0], "privcall") )
 {
@@ -596,6 +626,18 @@ int main(int argc, char *argv[])
 }
 }
 
+if ( write_ctrlreg_cr4 )
+{
+/* Mask the CR4.PGE bit so no events will be generated for global TLB 
flushes. */
+rc = xc_monitor_write_ctrlreg(xch, domain_id, VM_EVENT_X86_CR4, 1, 1,
+  X86_CR4_PGE, 1);
+if ( rc < 0 )
+{
+ERROR("Error %d setting write control register trapping with 
vm_event\n", rc);
+goto exit;
+}
+}
+
 /* Wait for access */
 for (;;)
 {
@@ -806,6 +848,15 @@ int main(int argc, char *argv[])
req.u.desc_access.is_write);
 rsp.flags |= VM_EVENT_FLAG_EMULATE;
 break;
+case VM_EVENT_REASON_WRITE_CTRLREG:
+printf("Control register written: rip=%016"PRIx64", vcpu %d: "
+   "reg=%s, old_value=%016"PRIx64", 
new_value=%016"PRIx64"\n",
+   req.data.regs.x86.rip,
+   req.vcpu_id,
+   get_x86_ctrl_reg_name(req.u.write_ctrlreg.index),
+   req.u.write_ctrlreg.old_value,
+   req.u.write_ctrlreg.new_value);
+break;
 default:
 fprintf(stderr, "UNKNOWN REASON CODE %d\n", req.reason);
 }
-- 
2.7.4


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [RFC PATCH v3 10/10] arm/mem_access: Walk the guest's pt in software

2017-06-16 Thread Sergej Proskurin
Hi Tamas,

[...]


>> +
>> +if ( ((flag & GV2M_WRITE) == GV2M_WRITE) && !(perms & GV2M_WRITE) )
> 
> Wouldn't it be enough to do (flag & GV2M_WRITE) without the following
> comparison? Also, a comment explaining why this is an error-condition
> would be nice.
> 

Yes, you are absolutely correct: (flag & GV2M_WRITE) is already
sufficient. I will adapt the upper if-statement and add a comment in the
next version of my patch series.

Cheers,
~Sergej

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 36/36] x86/mm: Add support to make use of Secure Memory Encryption

2017-06-16 Thread Tom Lendacky
Add support to check if SME has been enabled and if memory encryption
should be activated (checking of command line option based on the
configuration of the default state).  If memory encryption is to be
activated, then the encryption mask is set and the kernel is encrypted
"in place."

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/mem_encrypt.h |6 ++-
 arch/x86/kernel/head64.c   |4 +-
 arch/x86/mm/mem_encrypt.c  |   86 +++-
 3 files changed, 90 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index 7da6de3..aac9ed9 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -17,6 +17,8 @@
 
 #include 
 
+#include 
+
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
 extern unsigned long sme_me_mask;
@@ -37,7 +39,7 @@ void __init sme_early_decrypt(resource_size_t paddr,
 
 void __init sme_early_init(void);
 
-void __init sme_enable(void);
+void __init sme_enable(struct boot_params *bp);
 
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_init(void);
@@ -58,7 +60,7 @@ static inline void __init sme_unmap_bootdata(char 
*real_mode_data) { }
 
 static inline void __init sme_early_init(void) { }
 
-static inline void __init sme_enable(void) { }
+static inline void __init sme_enable(struct boot_params *bp) { }
 
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 9e94ed2..1ff2e98 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -43,7 +43,7 @@ static void __init *fixup_pointer(void *ptr, unsigned long 
physaddr)
return ptr - (void *)_text + (void *)physaddr;
 }
 
-void __init __startup_64(unsigned long physaddr)
+void __init __startup_64(unsigned long physaddr, struct boot_params *bp)
 {
unsigned long load_delta, *p;
unsigned long pgtable_flags;
@@ -68,7 +68,7 @@ void __init __startup_64(unsigned long physaddr)
for (;;);
 
/* Activate Secure Memory Encryption (SME) if supported and enabled */
-   sme_enable();
+   sme_enable(bp);
 
/* Include the SME encryption mask in the fixup value */
load_delta += sme_get_me_mask();
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 6e87662..13f780e 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -13,19 +13,34 @@
 #include 
 #include 
 
+#include 
+
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
 #include 
-#include 
 #include 
 #include 
 #include 
+#include 
+#include 
+#include 
+
+/*
+ * Some SME functions run very early causing issues with the stack-protector
+ * support. Provide a way to turn off this support on a per-function basis.
+ */
+#define SME_NOSTACKP __attribute__((__optimize__("no-stack-protector")))
+
+static char sme_cmdline_arg[] __initdata = "mem_encrypt";
+static char sme_cmdline_on[]  __initdata = "on";
+static char sme_cmdline_off[] __initdata = "off";
 
 /*
  * Since SME related variables are set early in the boot process they must
@@ -200,6 +215,8 @@ void __init mem_encrypt_init(void)
 
/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
swiotlb_update_mem_attributes();
+
+   pr_info("AMD Secure Memory Encryption (SME) active\n");
 }
 
 void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
@@ -527,8 +544,73 @@ void __init sme_encrypt_kernel(void)
native_write_cr3(__native_read_cr3());
 }
 
-void __init sme_enable(void)
+void __init SME_NOSTACKP sme_enable(struct boot_params *bp)
 {
+   const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
+   unsigned int eax, ebx, ecx, edx;
+   bool active_by_default;
+   unsigned long me_mask;
+   char buffer[16];
+   u64 msr;
+
+   /* Check for the SME support leaf */
+   eax = 0x8000;
+   ecx = 0;
+   native_cpuid(, , , );
+   if (eax < 0x801f)
+   return;
+
+   /*
+* Check for the SME feature:
+*   CPUID Fn8000_001F[EAX] - Bit 0
+* Secure Memory Encryption support
+*   CPUID Fn8000_001F[EBX] - Bits 5:0
+* Pagetable bit position used to indicate encryption
+*/
+   eax = 0x801f;
+   ecx = 0;
+   native_cpuid(, , , );
+   if (!(eax & 1))
+   return;
+
+   me_mask = 1UL << (ebx & 0x3f);
+
+   /* Check if SME is enabled */
+   msr = __rdmsr(MSR_K8_SYSCFG);
+   if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+   return;
+
+   /*
+* Fixups have not been applied to phys_base yet and we're running
+* identity mapped, so we must obtain the address to the SME command
+* line argument data using rip-relative addressing.
+*/
+   asm ("lea sme_cmdline_arg(%%rip), %0"
+: "=r" (cmdline_arg)
+

[Xen-devel] [PATCH v7 34/36] x86/mm: Add support to encrypt the kernel in-place

2017-06-16 Thread Tom Lendacky
Add the support to encrypt the kernel in-place. This is done by creating
new page mappings for the kernel - a decrypted write-protected mapping
and an encrypted mapping. The kernel is encrypted by copying it through
a temporary buffer.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/mem_encrypt.h |6 +
 arch/x86/mm/Makefile   |2 
 arch/x86/mm/mem_encrypt.c  |  314 
 arch/x86/mm/mem_encrypt_boot.S |  150 +
 4 files changed, 472 insertions(+)
 create mode 100644 arch/x86/mm/mem_encrypt_boot.S

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index af835cf..7da6de3 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,6 +21,12 @@
 
 extern unsigned long sme_me_mask;
 
+void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
+unsigned long decrypted_kernel_vaddr,
+unsigned long kernel_len,
+unsigned long encryption_wa,
+unsigned long encryption_pgd);
+
 void __init sme_early_encrypt(resource_size_t paddr,
  unsigned long size);
 void __init sme_early_decrypt(resource_size_t paddr,
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 9e13841..0633142 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -38,3 +38,5 @@ obj-$(CONFIG_NUMA_EMU)+= numa_emulation.o
 obj-$(CONFIG_X86_INTEL_MPX)+= mpx.o
 obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
 obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
+
+obj-$(CONFIG_AMD_MEM_ENCRYPT)  += mem_encrypt_boot.o
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 842c8a6..6e87662 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -24,6 +24,8 @@
 #include 
 #include 
 #include 
+#include 
+#include 
 
 /*
  * Since SME related variables are set early in the boot process they must
@@ -209,8 +211,320 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned 
long size)
set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT);
 }
 
+static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start,
+unsigned long end)
+{
+   unsigned long pgd_start, pgd_end, pgd_size;
+   pgd_t *pgd_p;
+
+   pgd_start = start & PGDIR_MASK;
+   pgd_end = end & PGDIR_MASK;
+
+   pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1);
+   pgd_size *= sizeof(pgd_t);
+
+   pgd_p = pgd_base + pgd_index(start);
+
+   memset(pgd_p, 0, pgd_size);
+}
+
+#ifndef CONFIG_X86_5LEVEL
+#define native_make_p4d(_x)(p4d_t) { .pgd = native_make_pgd(_x) }
+#endif
+
+#define PGD_FLAGS  _KERNPG_TABLE_NOENC
+#define P4D_FLAGS  _KERNPG_TABLE_NOENC
+#define PUD_FLAGS  _KERNPG_TABLE_NOENC
+#define PMD_FLAGS  (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
+
+static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area,
+unsigned long vaddr, pmdval_t pmd_val)
+{
+   pgd_t *pgd_p;
+   p4d_t *p4d_p;
+   pud_t *pud_p;
+   pmd_t *pmd_p;
+
+   pgd_p = pgd_base + pgd_index(vaddr);
+   if (native_pgd_val(*pgd_p)) {
+   if (IS_ENABLED(CONFIG_X86_5LEVEL))
+   p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & 
~PTE_FLAGS_MASK);
+   else
+   pud_p = (pud_t *)(native_pgd_val(*pgd_p) & 
~PTE_FLAGS_MASK);
+   } else {
+   pgd_t pgd;
+
+   if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+   p4d_p = pgtable_area;
+   memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
+   pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D;
+
+   pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS);
+   } else {
+   pud_p = pgtable_area;
+   memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+   pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+
+   pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS);
+   }
+   native_set_pgd(pgd_p, pgd);
+   }
+
+   if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+   p4d_p += p4d_index(vaddr);
+   if (native_p4d_val(*p4d_p)) {
+   pud_p = (pud_t *)(native_p4d_val(*p4d_p) & 
~PTE_FLAGS_MASK);
+   } else {
+   p4d_t p4d;
+
+   pud_p = pgtable_area;
+   memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
+   pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD;
+
+   p4d = native_make_p4d((p4dval_t)pud_p + P4D_FLAGS);
+   native_set_p4d(p4d_p, p4d);
+   }
+   }
+
+   pud_p += pud_index(vaddr);
+   if 

[Xen-devel] [PATCH v7 35/36] x86/boot: Add early cmdline parsing for options with arguments

2017-06-16 Thread Tom Lendacky
Add a cmdline_find_option() function to look for cmdline options that
take arguments. The argument is returned in a supplied buffer and the
argument length (regardless of whether it fits in the supplied buffer)
is returned, with -1 indicating not found.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/cmdline.h |2 +
 arch/x86/lib/cmdline.c |  105 
 2 files changed, 107 insertions(+)

diff --git a/arch/x86/include/asm/cmdline.h b/arch/x86/include/asm/cmdline.h
index e01f7f7..84ae170 100644
--- a/arch/x86/include/asm/cmdline.h
+++ b/arch/x86/include/asm/cmdline.h
@@ -2,5 +2,7 @@
 #define _ASM_X86_CMDLINE_H
 
 int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
+int cmdline_find_option(const char *cmdline_ptr, const char *option,
+   char *buffer, int bufsize);
 
 #endif /* _ASM_X86_CMDLINE_H */
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index 5cc78bf..3261abb 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -104,7 +104,112 @@ static inline int myisspace(u8 c)
return 0;   /* Buffer overrun */
 }
 
+/*
+ * Find a non-boolean option (i.e. option=argument). In accordance with
+ * standard Linux practice, if this option is repeated, this returns the
+ * last instance on the command line.
+ *
+ * @cmdline: the cmdline string
+ * @max_cmdline_size: the maximum size of cmdline
+ * @option: option string to look for
+ * @buffer: memory buffer to return the option argument
+ * @bufsize: size of the supplied memory buffer
+ *
+ * Returns the length of the argument (regardless of if it was
+ * truncated to fit in the buffer), or -1 on not found.
+ */
+static int
+__cmdline_find_option(const char *cmdline, int max_cmdline_size,
+ const char *option, char *buffer, int bufsize)
+{
+   char c;
+   int pos = 0, len = -1;
+   const char *opptr = NULL;
+   char *bufptr = buffer;
+   enum {
+   st_wordstart = 0,   /* Start of word/after whitespace */
+   st_wordcmp, /* Comparing this word */
+   st_wordskip,/* Miscompare, skip */
+   st_bufcpy,  /* Copying this to buffer */
+   } state = st_wordstart;
+
+   if (!cmdline)
+   return -1;  /* No command line */
+
+   /*
+* This 'pos' check ensures we do not overrun
+* a non-NULL-terminated 'cmdline'
+*/
+   while (pos++ < max_cmdline_size) {
+   c = *(char *)cmdline++;
+   if (!c)
+   break;
+
+   switch (state) {
+   case st_wordstart:
+   if (myisspace(c))
+   break;
+
+   state = st_wordcmp;
+   opptr = option;
+   /* fall through */
+
+   case st_wordcmp:
+   if ((c == '=') && !*opptr) {
+   /*
+* We matched all the way to the end of the
+* option we were looking for, prepare to
+* copy the argument.
+*/
+   len = 0;
+   bufptr = buffer;
+   state = st_bufcpy;
+   break;
+   } else if (c == *opptr++) {
+   /*
+* We are currently matching, so continue
+* to the next character on the cmdline.
+*/
+   break;
+   }
+   state = st_wordskip;
+   /* fall through */
+
+   case st_wordskip:
+   if (myisspace(c))
+   state = st_wordstart;
+   break;
+
+   case st_bufcpy:
+   if (myisspace(c)) {
+   state = st_wordstart;
+   } else {
+   /*
+* Increment len, but don't overrun the
+* supplied buffer and leave room for the
+* NULL terminator.
+*/
+   if (++len < bufsize)
+   *bufptr++ = c;
+   }
+   break;
+   }
+   }
+
+   if (bufsize)
+   *bufptr = '\0';
+
+   return len;
+}
+
 int cmdline_find_option_bool(const char *cmdline, const char *option)
 {
return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
 }
+
+int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
+ 

[Xen-devel] [PATCH v7 33/36] x86/mm: Use proper encryption attributes with /dev/mem

2017-06-16 Thread Tom Lendacky
When accessing memory using /dev/mem (or /dev/kmem) use the proper
encryption attributes when mapping the memory.

To insure the proper attributes are applied when reading or writing
/dev/mem, update the xlate_dev_mem_ptr() function to use memremap()
which will essentially perform the same steps of applying __va for
RAM or using ioremap() for if not RAM.

To insure the proper attributes are applied when mmapping /dev/mem,
update the phys_mem_access_prot() to call phys_mem_access_encrypted(),
a new function which will check if the memory should be mapped encrypted
or not. If it is not to be mapped encrypted then the VMA protection
value is updated to remove the encryption bit.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/io.h |3 +++
 arch/x86/mm/ioremap.c |   18 +-
 arch/x86/mm/pat.c |3 +++
 3 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 09c5557..e080a39 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -386,4 +386,7 @@ extern bool arch_memremap_can_ram_remap(resource_size_t 
offset,
unsigned long flags);
 #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
 
+extern bool phys_mem_access_encrypted(unsigned long phys_addr,
+ unsigned long size);
+
 #endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0254b78..5d8b3cf 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -404,12 +404,10 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
unsigned long offset = phys & ~PAGE_MASK;
void *vaddr;
 
-   /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
-   if (page_is_ram(start >> PAGE_SHIFT))
-   return __va(phys);
+   /* memremap() maps if RAM, otherwise falls back to ioremap() */
+   vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
 
-   vaddr = ioremap_cache(start, PAGE_SIZE);
-   /* Only add the offset on success and return NULL if the ioremap() 
failed: */
+   /* Only add the offset on success and return NULL if memremap() failed 
*/
if (vaddr)
vaddr += offset;
 
@@ -418,10 +416,7 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
 
 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 {
-   if (page_is_ram(phys >> PAGE_SHIFT))
-   return;
-
-   iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
+   memunmap((void *)((unsigned long)addr & PAGE_MASK));
 }
 
 /*
@@ -630,6 +625,11 @@ pgprot_t __init 
early_memremap_pgprot_adjust(resource_size_t phys_addr,
return prot;
 }
 
+bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
+{
+   return arch_memremap_can_ram_remap(phys_addr, size, 0);
+}
+
 #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
 /* Remap memory with encryption */
 void __init *early_memremap_encrypted(resource_size_t phys_addr,
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 6753d9c..b970c95 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -748,6 +748,9 @@ void arch_io_free_memtype_wc(resource_size_t start, 
resource_size_t size)
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
 {
+   if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
+   vma_prot = pgprot_decrypted(vma_prot);
+
return vma_prot;
 }
 


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 27/36] iommu/amd: Allow the AMD IOMMU to work with memory encryption

2017-06-16 Thread Tom Lendacky
The IOMMU is programmed with physical addresses for the various tables
and buffers that are used to communicate between the device and the
driver. When the driver allocates this memory it is encrypted. In order
for the IOMMU to access the memory as encrypted the encryption mask needs
to be included in these physical addresses during configuration.

The PTE entries created by the IOMMU should also include the encryption
mask so that when the device behind the IOMMU performs a DMA, the DMA
will be performed to encrypted memory.

Signed-off-by: Tom Lendacky 
---
 drivers/iommu/amd_iommu.c   |   30 --
 drivers/iommu/amd_iommu_init.c  |   34 --
 drivers/iommu/amd_iommu_proto.h |   10 ++
 drivers/iommu/amd_iommu_types.h |2 +-
 4 files changed, 55 insertions(+), 21 deletions(-)

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 63cacf5..912008c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -544,7 +544,7 @@ static void dump_dte_entry(u16 devid)
 
 static void dump_command(unsigned long phys_addr)
 {
-   struct iommu_cmd *cmd = phys_to_virt(phys_addr);
+   struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
int i;
 
for (i = 0; i < 4; ++i)
@@ -865,11 +865,13 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
 
 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 {
+   u64 paddr = iommu_virt_to_phys((void *)address);
+
WARN_ON(address & 0x7ULL);
 
memset(cmd, 0, sizeof(*cmd));
-   cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
-   cmd->data[1] = upper_32_bits(__pa(address));
+   cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
+   cmd->data[1] = upper_32_bits(paddr);
cmd->data[2] = 1;
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
 }
@@ -1328,7 +1330,7 @@ static bool increase_address_space(struct 
protection_domain *domain,
return false;
 
*pte = PM_LEVEL_PDE(domain->mode,
-   virt_to_phys(domain->pt_root));
+   iommu_virt_to_phys(domain->pt_root));
domain->pt_root  = pte;
domain->mode+= 1;
domain->updated  = true;
@@ -1365,7 +1367,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
if (!page)
return NULL;
 
-   __npte = PM_LEVEL_PDE(level, virt_to_phys(page));
+   __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
 
/* pte could have been changed somewhere. */
if (cmpxchg64(pte, __pte, __npte) != __pte) {
@@ -1481,10 +1483,10 @@ static int iommu_map_page(struct protection_domain *dom,
return -EBUSY;
 
if (count > 1) {
-   __pte = PAGE_SIZE_PTE(phys_addr, page_size);
+   __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
} else
-   __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
+   __pte = __sme_set(phys_addr) | IOMMU_PTE_P | IOMMU_PTE_FC;
 
if (prot & IOMMU_PROT_IR)
__pte |= IOMMU_PTE_IR;
@@ -1700,7 +1702,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
if (!(tbl[i] & GCR3_VALID))
continue;
 
-   ptr = __va(tbl[i] & PAGE_MASK);
+   ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
 
free_page((unsigned long)ptr);
}
@@ -1715,7 +1717,7 @@ static void free_gcr3_tbl_level2(u64 *tbl)
if (!(tbl[i] & GCR3_VALID))
continue;
 
-   ptr = __va(tbl[i] & PAGE_MASK);
+   ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
 
free_gcr3_tbl_level1(ptr);
}
@@ -1807,7 +1809,7 @@ static void set_dte_entry(u16 devid, struct 
protection_domain *domain, bool ats)
u64 flags = 0;
 
if (domain->mode != PAGE_MODE_NONE)
-   pte_root = virt_to_phys(domain->pt_root);
+   pte_root = iommu_virt_to_phys(domain->pt_root);
 
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
@@ -1819,7 +1821,7 @@ static void set_dte_entry(u16 devid, struct 
protection_domain *domain, bool ats)
flags |= DTE_FLAG_IOTLB;
 
if (domain->flags & PD_IOMMUV2_MASK) {
-   u64 gcr3 = __pa(domain->gcr3_tbl);
+   u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
u64 glx  = domain->glx;
u64 tmp;
 
@@ -3470,10 +3472,10 @@ static u64 *__get_gcr3_pte(u64 *root, int level, int 
pasid, bool alloc)
if (root == NULL)
return NULL;
 
-  

[Xen-devel] [PATCH v7 32/36] xen/x86: Remove SME feature in PV guests

2017-06-16 Thread Tom Lendacky
Xen does not currently support SME for PV guests. Clear the SME cpu
capability in order to avoid any ambiguity.

Signed-off-by: Tom Lendacky 
---
 arch/x86/xen/enlighten_pv.c |1 +
 1 file changed, 1 insertion(+)

diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index f33eef4..e6ecf42 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -294,6 +294,7 @@ static void __init xen_init_capabilities(void)
setup_clear_cpu_cap(X86_FEATURE_MTRR);
setup_clear_cpu_cap(X86_FEATURE_ACC);
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+   setup_clear_cpu_cap(X86_FEATURE_SME);
 
if (!xen_initial_domain())
setup_clear_cpu_cap(X86_FEATURE_ACPI);


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 30/36] kvm: x86: svm: Support Secure Memory Encryption within KVM

2017-06-16 Thread Tom Lendacky
Update the KVM support to work with SME. The VMCB has a number of fields
where physical addresses are used and these addresses must contain the
memory encryption mask in order to properly access the encrypted memory.
Also, use the memory encryption mask when creating and using the nested
page tables.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/kvm_host.h |2 +-
 arch/x86/kvm/mmu.c  |   12 
 arch/x86/kvm/mmu.h  |2 +-
 arch/x86/kvm/svm.c  |   35 ++-
 arch/x86/kvm/vmx.c  |3 ++-
 arch/x86/kvm/x86.c  |3 ++-
 6 files changed, 32 insertions(+), 25 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 695605e..6d1267f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1069,7 +1069,7 @@ struct kvm_arch_async_pf {
 void kvm_mmu_uninit_vm(struct kvm *kvm);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
-   u64 acc_track_mask);
+   u64 acc_track_mask, u64 me_mask);
 
 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cb82259..e85888c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -107,7 +107,7 @@ enum {
(((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
 
 
-#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
+#define PT64_BASE_ADDR_MASK __sme_clr1ULL << 52) - 1) & 
~(u64)(PAGE_SIZE-1)))
 #define PT64_DIR_BASE_ADDR_MASK \
(PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
 #define PT64_LVL_ADDR_MASK(level) \
@@ -125,7 +125,7 @@ enum {
* PT32_LEVEL_BITS))) - 1))
 
 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
-   | shadow_x_mask | shadow_nx_mask)
+   | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
 
 #define ACC_EXEC_MASK1
 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
@@ -184,6 +184,7 @@ struct kvm_shadow_walk_iterator {
 static u64 __read_mostly shadow_dirty_mask;
 static u64 __read_mostly shadow_mmio_mask;
 static u64 __read_mostly shadow_present_mask;
+static u64 __read_mostly shadow_me_mask;
 
 /*
  * The mask/value to distinguish a PTE that has been marked not-present for
@@ -317,7 +318,7 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
 
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
-   u64 acc_track_mask)
+   u64 acc_track_mask, u64 me_mask)
 {
if (acc_track_mask != 0)
acc_track_mask |= SPTE_SPECIAL_MASK;
@@ -330,6 +331,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
shadow_present_mask = p_mask;
shadow_acc_track_mask = acc_track_mask;
WARN_ON(shadow_accessed_mask != 0 && shadow_acc_track_mask != 0);
+   shadow_me_mask = me_mask;
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
@@ -2398,7 +2400,8 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 
*sptep,
BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
 
spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
-  shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
+  shadow_user_mask | shadow_x_mask | shadow_accessed_mask |
+  shadow_me_mask;
 
mmu_spte_set(sptep, spte);
 
@@ -2700,6 +2703,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
pte_access &= ~ACC_WRITE_MASK;
 
spte |= (u64)pfn << PAGE_SHIFT;
+   spte |= shadow_me_mask;
 
if (pte_access & ACC_WRITE_MASK) {
 
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 330bf3a..08b779d 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -48,7 +48,7 @@
 
 static inline u64 rsvd_bits(int s, int e)
 {
-   return ((1ULL << (e - s + 1)) - 1) << s;
+   return __sme_clr(((1ULL << (e - s + 1)) - 1) << s);
 }
 
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ba9891a..d2e9fca 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1138,9 +1138,9 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
 {
struct vmcb *vmcb = svm->vmcb;
struct kvm_arch *vm_data = >vcpu.kvm->arch;
-   phys_addr_t bpa = page_to_phys(svm->avic_backing_page);
-   phys_addr_t lpa = page_to_phys(vm_data->avic_logical_id_table_page);
-   phys_addr_t ppa = page_to_phys(vm_data->avic_physical_id_table_page);
+   phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
+   phys_addr_t lpa = 

[Xen-devel] [PATCH v7 31/36] x86/mm, kexec: Allow kexec to be used with SME

2017-06-16 Thread Tom Lendacky
Provide support so that kexec can be used to boot a kernel when SME is
enabled.

Support is needed to allocate pages for kexec without encryption.  This
is needed in order to be able to reboot in the kernel in the same manner
as originally booted.

Additionally, when shutting down all of the CPUs we need to be sure to
flush the caches and then halt. This is needed when booting from a state
where SME was not active into a state where SME is active (or vice-versa).
Without these steps, it is possible for cache lines to exist for the same
physical location but tagged both with and without the encryption bit. This
can cause random memory corruption when caches are flushed depending on
which cacheline is written last.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/init.h  |1 +
 arch/x86/include/asm/kexec.h |8 
 arch/x86/include/asm/pgtable_types.h |1 +
 arch/x86/kernel/machine_kexec_64.c   |   22 +-
 arch/x86/kernel/process.c|   17 +++--
 arch/x86/mm/ident_map.c  |   12 
 include/linux/kexec.h|   14 ++
 kernel/kexec_core.c  |   12 +++-
 8 files changed, 79 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 474eb8c..05c4aa0 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -7,6 +7,7 @@ struct x86_mapping_info {
unsigned long page_flag; /* page flag for PMD or PUD entry */
unsigned long offset;/* ident mapping offset */
bool direct_gbpages; /* PUD level 1GB page support */
+   unsigned long kernpg_flag;   /* kernel pagetable flag override */
 };
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 70ef205..e8183ac 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -207,6 +207,14 @@ struct kexec_entry64_regs {
uint64_t r15;
uint64_t rip;
 };
+
+extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
+  gfp_t gfp);
+#define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
+
+extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
+#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
+
 #endif
 
 typedef void crash_vmclear_fn(void);
diff --git a/arch/x86/include/asm/pgtable_types.h 
b/arch/x86/include/asm/pgtable_types.h
index 32095af..830992f 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -213,6 +213,7 @@ enum page_cache_mode {
 #define PAGE_KERNEL__pgprot(__PAGE_KERNEL | _PAGE_ENC)
 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
 #define PAGE_KERNEL_EXEC   __pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
+#define PAGE_KERNEL_EXEC_NOENC __pgprot(__PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
 #define PAGE_KERNEL_NOCACHE__pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
 #define PAGE_KERNEL_LARGE  __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
diff --git a/arch/x86/kernel/machine_kexec_64.c 
b/arch/x86/kernel/machine_kexec_64.c
index cb0a304..9cf8daa 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -87,7 +87,7 @@ static int init_transition_pgtable(struct kimage *image, 
pgd_t *pgd)
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
}
pte = pte_offset_kernel(pmd, vaddr);
-   set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
+   set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
return 0;
 err:
free_transition_pgtable(image);
@@ -115,6 +115,7 @@ static int init_pgtable(struct kimage *image, unsigned long 
start_pgtable)
.alloc_pgt_page = alloc_pgt_page,
.context= image,
.page_flag  = __PAGE_KERNEL_LARGE_EXEC,
+   .kernpg_flag= _KERNPG_TABLE_NOENC,
};
unsigned long mstart, mend;
pgd_t *level4p;
@@ -602,3 +603,22 @@ void arch_kexec_unprotect_crashkres(void)
 {
kexec_mark_crashkres(false);
 }
+
+int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
+{
+   /*
+* If SME is active we need to be sure that kexec pages are
+* not encrypted because when we boot to the new kernel the
+* pages won't be accessed encrypted (initially).
+*/
+   return set_memory_decrypted((unsigned long)vaddr, pages);
+}
+
+void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages)
+{
+   /*
+* If SME is active we need to reset the pages back to being
+* an encrypted mapping before freeing them.
+*/
+   set_memory_encrypted((unsigned 

[Xen-devel] [PATCH v7 29/36] x86, drm, fbdev: Do not specify encrypted memory for video mappings

2017-06-16 Thread Tom Lendacky
Since video memory needs to be accessed decrypted, be sure that the
memory encryption mask is not set for the video ranges.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/vga.h   |   14 +-
 arch/x86/mm/pageattr.c   |2 ++
 drivers/gpu/drm/drm_gem.c|2 ++
 drivers/gpu/drm/drm_vm.c |4 
 drivers/gpu/drm/ttm/ttm_bo_vm.c  |7 +--
 drivers/gpu/drm/udl/udl_fb.c |4 
 drivers/video/fbdev/core/fbmem.c |   12 
 7 files changed, 42 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/vga.h b/arch/x86/include/asm/vga.h
index c4b9dc2..9f42bee 100644
--- a/arch/x86/include/asm/vga.h
+++ b/arch/x86/include/asm/vga.h
@@ -7,12 +7,24 @@
 #ifndef _ASM_X86_VGA_H
 #define _ASM_X86_VGA_H
 
+#include 
+
 /*
  * On the PC, we can just recalculate addresses and then
  * access the videoram directly without any black magic.
+ * To support memory encryption however, we need to access
+ * the videoram as decrypted memory.
  */
 
-#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
+#define VGA_MAP_MEM(x, s)  \
+({ \
+   unsigned long start = (unsigned long)phys_to_virt(x);   \
+   \
+   if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) \
+   set_memory_decrypted(start, (s) >> PAGE_SHIFT); \
+   \
+   start;  \
+})
 
 #define vga_readb(x) (*(x))
 #define vga_writeb(x, y) (*(y) = (x))
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index d9e09fb..13fc5db 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1825,11 +1825,13 @@ int set_memory_encrypted(unsigned long addr, int 
numpages)
 {
return __set_memory_enc_dec(addr, numpages, true);
 }
+EXPORT_SYMBOL_GPL(set_memory_encrypted);
 
 int set_memory_decrypted(unsigned long addr, int numpages)
 {
return __set_memory_enc_dec(addr, numpages, false);
 }
+EXPORT_SYMBOL_GPL(set_memory_decrypted);
 
 int set_pages_uc(struct page *page, int numpages)
 {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b1e28c9..019f48c 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -36,6 +36,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -928,6 +929,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned 
long obj_size,
vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = 
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+   vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 
/* Take a ref for this mapping of the object, so that the fault
 * handler can dereference the mmap offset's pointer to the object.
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 1170b32..ed4bcbf 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -40,6 +40,7 @@
 #include 
 #include 
 #endif
+#include 
 #include 
 #include "drm_internal.h"
 #include "drm_legacy.h"
@@ -58,6 +59,9 @@ static pgprot_t drm_io_prot(struct drm_local_map *map,
 {
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 
+   /* We don't want graphics memory to be mapped encrypted */
+   tmp = pgprot_decrypted(tmp);
+
 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
tmp = pgprot_noncached(tmp);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 9f53df9..622dab6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -39,6 +39,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
@@ -230,9 +231,11 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
 * first page.
 */
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-   if (bo->mem.bus.is_iomem)
+   if (bo->mem.bus.is_iomem) {
+   /* Iomem should not be marked encrypted */
+   cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
pfn = bdev->driver->io_mem_pfn(bo, page_offset);
-   else {
+   } else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 4a65003..92e1690 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -14,6 +14,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 

[Xen-devel] [PATCH v7 28/36] x86, realmode: Check for memory encryption on the APs

2017-06-16 Thread Tom Lendacky
Add support to check if memory encryption is active in the kernel and that
it has been enabled on the AP. If memory encryption is active in the kernel
but has not been enabled on the AP, then set the memory encryption bit (bit
23) of MSR_K8_SYSCFG to enable memory encryption on that AP and allow the
AP to continue start up.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/realmode.h  |   12 
 arch/x86/realmode/init.c |4 
 arch/x86/realmode/rm/trampoline_64.S |   24 
 3 files changed, 40 insertions(+)

diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index 230e190..90d9152 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -1,6 +1,15 @@
 #ifndef _ARCH_X86_REALMODE_H
 #define _ARCH_X86_REALMODE_H
 
+/*
+ * Flag bit definitions for use with the flags field of the trampoline header
+ * in the CONFIG_X86_64 variant.
+ */
+#define TH_FLAGS_SME_ACTIVE_BIT0
+#define TH_FLAGS_SME_ACTIVEBIT(TH_FLAGS_SME_ACTIVE_BIT)
+
+#ifndef __ASSEMBLY__
+
 #include 
 #include 
 
@@ -38,6 +47,7 @@ struct trampoline_header {
u64 start;
u64 efer;
u32 cr4;
+   u32 flags;
 #endif
 };
 
@@ -69,4 +79,6 @@ static inline size_t real_mode_size_needed(void)
 void set_real_mode_mem(phys_addr_t mem, size_t size);
 void reserve_real_mode(void);
 
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index d6ddc7e..1f71980 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -108,6 +108,10 @@ static void __init setup_real_mode(void)
trampoline_cr4_features = _header->cr4;
*trampoline_cr4_features = mmu_cr4_features;
 
+   trampoline_header->flags = 0;
+   if (sme_active())
+   trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
+
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
trampoline_pgd[511] = init_top_pgt[511].pgd;
diff --git a/arch/x86/realmode/rm/trampoline_64.S 
b/arch/x86/realmode/rm/trampoline_64.S
index dac7b20..614fd70 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -30,6 +30,7 @@
 #include 
 #include 
 #include 
+#include 
 #include "realmode.h"
 
.text
@@ -92,6 +93,28 @@ ENTRY(startup_32)
movl%edx, %fs
movl%edx, %gs
 
+   /*
+* Check for memory encryption support. This is a safety net in
+* case BIOS hasn't done the necessary step of setting the bit in
+* the MSR for this AP. If SME is active and we've gotten this far
+* then it is safe for us to set the MSR bit and continue. If we
+* don't we'll eventually crash trying to execute encrypted
+* instructions.
+*/
+   bt  $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
+   jnc .Ldone
+   movl$MSR_K8_SYSCFG, %ecx
+   rdmsr
+   bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
+   jc  .Ldone
+
+   /*
+* Memory encryption is enabled but the SME enable bit for this
+* CPU has has not been set.  It is safe to set it, so do so.
+*/
+   wrmsr
+.Ldone:
+
movlpa_tr_cr4, %eax
movl%eax, %cr4  # Enable PAE mode
 
@@ -147,6 +170,7 @@ GLOBAL(trampoline_header)
tr_start:   .space  8
GLOBAL(tr_efer) .space  8
GLOBAL(tr_cr4)  .space  4
+   GLOBAL(tr_flags).space  4
 END(trampoline_header)
 
 #include "trampoline_common.S"


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 26/36] x86/CPU/AMD: Make the microcode level available earlier in the boot

2017-06-16 Thread Tom Lendacky
Move the setting of the cpuinfo_x86.microcode field from amd_init() to
early_amd_init() so that it is available earlier in the boot process. This
avoids having to read MSR_AMD64_PATCH_LEVEL directly during early boot.

Signed-off-by: Tom Lendacky 
---
 arch/x86/kernel/cpu/amd.c |8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 5bdcbd4..fdcf305 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -547,8 +547,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
 static void early_init_amd(struct cpuinfo_x86 *c)
 {
+   u32 dummy;
+
early_init_amd_mc(c);
 
+   rdmsr_safe(MSR_AMD64_PATCH_LEVEL, >microcode, );
+
/*
 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 * with P/T states and does not stop in deep C-states
@@ -746,8 +750,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
 
 static void init_amd(struct cpuinfo_x86 *c)
 {
-   u32 dummy;
-
early_init_amd(c);
 
/*
@@ -809,8 +811,6 @@ static void init_amd(struct cpuinfo_x86 *c)
if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT);
 
-   rdmsr_safe(MSR_AMD64_PATCH_LEVEL, >microcode, );
-
/* 3DNow or LM implies PREFETCHW */
if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 25/36] swiotlb: Add warnings for use of bounce buffers with SME

2017-06-16 Thread Tom Lendacky
Add warnings to let the user know when bounce buffers are being used for
DMA when SME is active.  Since the bounce buffers are not in encrypted
memory, these notifications are to allow the user to determine some
appropriate action - if necessary.  Actions can range from utilizing an
IOMMU, replacing the device with another device that can support 64-bit
DMA, ignoring the message if the device isn't used much, etc.

Signed-off-by: Tom Lendacky 
---
 include/linux/dma-mapping.h |   11 +++
 include/linux/mem_encrypt.h |8 
 lib/swiotlb.c   |3 +++
 3 files changed, 22 insertions(+)

diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4f3eece..ee2307e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -10,6 +10,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /**
  * List of possible attributes associated with a DMA mapping. The semantics
@@ -577,6 +578,11 @@ static inline int dma_set_mask(struct device *dev, u64 
mask)
 
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
+
+   /* Since mask is unsigned, this can only be true if SME is active */
+   if (mask < sme_dma_mask())
+   dev_warn(dev, "SME is active, device will require DMA bounce 
buffers\n");
+
*dev->dma_mask = mask;
return 0;
 }
@@ -596,6 +602,11 @@ static inline int dma_set_coherent_mask(struct device 
*dev, u64 mask)
 {
if (!dma_supported(dev, mask))
return -EIO;
+
+   /* Since mask is unsigned, this can only be true if SME is active */
+   if (mask < sme_dma_mask())
+   dev_warn(dev, "SME is active, device will require DMA bounce 
buffers\n");
+
dev->coherent_dma_mask = mask;
return 0;
 }
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 837c66b..2168002 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -30,6 +30,14 @@ static inline bool sme_active(void)
return !!sme_me_mask;
 }
 
+static inline u64 sme_dma_mask(void)
+{
+   if (!sme_me_mask)
+   return 0ULL;
+
+   return ((u64)sme_me_mask << 1) - 1;
+}
+
 /*
  * The __sme_set() and __sme_clr() macros are useful for adding or removing
  * the encryption mask from a value (e.g. when dealing with pagetable
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 04ac91a..8c6c83e 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -507,6 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer earlier and can't now 
provide you with the DMA bounce buffer");
 
+   if (sme_active())
+   pr_warn_once("SME is active and system is using DMA bounce 
buffers\n");
+
mask = dma_get_seg_boundary(hwdev);
 
tbl_dma_addr &= mask;


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 23/36] x86, realmode: Decrypt trampoline area if memory encryption is active

2017-06-16 Thread Tom Lendacky
When Secure Memory Encryption is enabled, the trampoline area must not
be encrypted. A CPU running in real mode will not be able to decrypt
memory that has been encrypted because it will not be able to use addresses
with the memory encryption mask.

Signed-off-by: Tom Lendacky 
---
 arch/x86/realmode/init.c |8 
 1 file changed, 8 insertions(+)

diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index cd4be19..d6ddc7e 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -1,6 +1,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -59,6 +60,13 @@ static void __init setup_real_mode(void)
 
base = (unsigned char *)real_mode_header;
 
+   /*
+* If SME is active, the trampoline area will need to be in
+* decrypted memory in order to bring up other processors
+* successfully.
+*/
+   set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
+
memcpy(base, real_mode_blob, size);
 
phys_base = __pa(base);


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 24/36] x86, swiotlb: Add memory encryption support

2017-06-16 Thread Tom Lendacky
Since DMA addresses will effectively look like 48-bit addresses when the
memory encryption mask is set, SWIOTLB is needed if the DMA mask of the
device performing the DMA does not support 48-bits. SWIOTLB will be
initialized to create decrypted bounce buffers for use by these devices.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/dma-mapping.h |5 ++-
 arch/x86/include/asm/mem_encrypt.h |5 +++
 arch/x86/kernel/pci-dma.c  |   11 +--
 arch/x86/kernel/pci-nommu.c|2 +
 arch/x86/kernel/pci-swiotlb.c  |   15 +-
 arch/x86/mm/mem_encrypt.c  |   22 +++
 include/linux/swiotlb.h|1 +
 init/main.c|   10 +++
 lib/swiotlb.c  |   54 +++-
 9 files changed, 108 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/dma-mapping.h 
b/arch/x86/include/asm/dma-mapping.h
index 08a0838..191f9a5 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #ifdef CONFIG_ISA
 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -62,12 +63,12 @@ static inline bool dma_capable(struct device *dev, 
dma_addr_t addr, size_t size)
 
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
 {
-   return paddr;
+   return __sme_set(paddr);
 }
 
 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 {
-   return daddr;
+   return __sme_clr(daddr);
 }
 #endif /* CONFIG_X86_DMA_REMAP */
 
diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index 3ffa5fa..af835cf 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -33,6 +33,11 @@ void __init sme_early_decrypt(resource_size_t paddr,
 
 void __init sme_enable(void);
 
+/* Architecture __weak replacement functions */
+void __init mem_encrypt_init(void);
+
+void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
+
 #else  /* !CONFIG_AMD_MEM_ENCRYPT */
 
 #define sme_me_mask0UL
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3a216ec..72d96d4 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -93,9 +93,12 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t 
size,
if (gfpflags_allow_blocking(flag)) {
page = dma_alloc_from_contiguous(dev, count, get_order(size),
 flag);
-   if (page && page_to_phys(page) + size > dma_mask) {
-   dma_release_from_contiguous(dev, page, count);
-   page = NULL;
+   if (page) {
+   addr = phys_to_dma(dev, page_to_phys(page));
+   if (addr + size > dma_mask) {
+   dma_release_from_contiguous(dev, page, count);
+   page = NULL;
+   }
}
}
/* fallback */
@@ -104,7 +107,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t 
size,
if (!page)
return NULL;
 
-   addr = page_to_phys(page);
+   addr = phys_to_dma(dev, page_to_phys(page));
if (addr + size > dma_mask) {
__free_pages(page, get_order(size));
 
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index a88952e..98b576a 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -30,7 +30,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct 
page *page,
 enum dma_data_direction dir,
 unsigned long attrs)
 {
-   dma_addr_t bus = page_to_phys(page) + offset;
+   dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
return DMA_ERROR_CODE;
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 1e23577..6770775 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -6,12 +6,14 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
 #include 
 #include 
 #include 
+
 int swiotlb __read_mostly;
 
 void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -79,8 +81,8 @@ int __init pci_swiotlb_detect_override(void)
  pci_swiotlb_late_init);
 
 /*
- * if 4GB or more detected (and iommu=off not set) return 1
- * and set swiotlb to 1.
+ * If 4GB or more detected (and iommu=off not set) or if SME is active
+ * then set swiotlb to 1 and return 1.
  */
 int __init pci_swiotlb_detect_4gb(void)
 {
@@ -89,6 +91,15 @@ int __init pci_swiotlb_detect_4gb(void)
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
swiotlb = 1;
 #endif
+
+   /*
+* If SME is 

[Xen-devel] [PATCH v7 22/36] x86/mm: Add support for changing the memory encryption attribute

2017-06-16 Thread Tom Lendacky
Add support for changing the memory encryption attribute for one or more
memory pages. This will be useful when we have to change the AP trampoline
area to not be encrypted. Or when we need to change the SWIOTLB area to
not be encrypted in support of devices that can't support the encryption
mask range.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/set_memory.h |3 ++
 arch/x86/mm/pageattr.c|   62 +
 2 files changed, 65 insertions(+)

diff --git a/arch/x86/include/asm/set_memory.h 
b/arch/x86/include/asm/set_memory.h
index eaec6c3..cd71273 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -11,6 +11,7 @@
  * Executability : eXeutable, NoteXecutable
  * Read/Write: ReadOnly, ReadWrite
  * Presence  : NotPresent
+ * Encryption: Encrypted, Decrypted
  *
  * Within a category, the attributes are mutually exclusive.
  *
@@ -42,6 +43,8 @@
 int set_memory_wb(unsigned long addr, int numpages);
 int set_memory_np(unsigned long addr, int numpages);
 int set_memory_4k(unsigned long addr, int numpages);
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
 
 int set_memory_array_uc(unsigned long *addr, int addrinarray);
 int set_memory_array_wc(unsigned long *addr, int addrinarray);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e7d3866..d9e09fb 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1769,6 +1769,68 @@ int set_memory_4k(unsigned long addr, int numpages)
__pgprot(0), 1, 0, NULL);
 }
 
+static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
+{
+   struct cpa_data cpa;
+   unsigned long start;
+   int ret;
+
+   /* Nothing to do if the SME is not active */
+   if (!sme_active())
+   return 0;
+
+   /* Should not be working on unaligned addresses */
+   if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
+   addr &= PAGE_MASK;
+
+   start = addr;
+
+   memset(, 0, sizeof(cpa));
+   cpa.vaddr = 
+   cpa.numpages = numpages;
+   cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
+   cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
+   cpa.pgd = init_mm.pgd;
+
+   /* Must avoid aliasing mappings in the highmem code */
+   kmap_flush_unused();
+   vm_unmap_aliases();
+
+   /*
+* Before changing the encryption attribute, we need to flush caches.
+*/
+   if (static_cpu_has(X86_FEATURE_CLFLUSH))
+   cpa_flush_range(start, numpages, 1);
+   else
+   cpa_flush_all(1);
+
+   ret = __change_page_attr_set_clr(, 1);
+
+   /*
+* After changing the encryption attribute, we need to flush TLBs
+* again in case any speculative TLB caching occurred (but no need
+* to flush caches again).  We could just use cpa_flush_all(), but
+* in case TLB flushing gets optimized in the cpa_flush_range()
+* path use the same logic as above.
+*/
+   if (static_cpu_has(X86_FEATURE_CLFLUSH))
+   cpa_flush_range(start, numpages, 0);
+   else
+   cpa_flush_all(0);
+
+   return ret;
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+   return __set_memory_enc_dec(addr, numpages, true);
+}
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+   return __set_memory_enc_dec(addr, numpages, false);
+}
+
 int set_pages_uc(struct page *page, int numpages)
 {
unsigned long addr = (unsigned long)page_address(page);


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 21/36] x86/mm: Add support to access persistent memory in the clear

2017-06-16 Thread Tom Lendacky
Persistent memory is expected to persist across reboots. The encryption
key used by SME will change across reboots which will result in corrupted
persistent memory.  Persistent memory is handed out by block devices
through memory remapping functions, so be sure not to map this memory as
encrypted.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/mm/ioremap.c |   31 ++-
 1 file changed, 30 insertions(+), 1 deletion(-)

diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index f3fa007..0254b78 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -428,17 +428,46 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  * Examine the physical address to determine if it is an area of memory
  * that should be mapped decrypted.  If the memory is not part of the
  * kernel usable area it was accessed and created decrypted, so these
- * areas should be mapped decrypted.
+ * areas should be mapped decrypted. And since the encryption key can
+ * change across reboots, persistent memory should also be mapped
+ * decrypted.
  */
 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
  unsigned long size)
 {
+   int is_pmem;
+
+   /*
+* Check if the address is part of a persistent memory region.
+* This check covers areas added by E820, EFI and ACPI.
+*/
+   is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
+   IORES_DESC_PERSISTENT_MEMORY);
+   if (is_pmem != REGION_DISJOINT)
+   return true;
+
+   /*
+* Check if the non-volatile attribute is set for an EFI
+* reserved area.
+*/
+   if (efi_enabled(EFI_BOOT)) {
+   switch (efi_mem_type(phys_addr)) {
+   case EFI_RESERVED_TYPE:
+   if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
+   return true;
+   break;
+   default:
+   break;
+   }
+   }
+
/* Check if the address is outside kernel usable area */
switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
case E820_TYPE_RESERVED:
case E820_TYPE_ACPI:
case E820_TYPE_NVS:
case E820_TYPE_UNUSABLE:
+   case E820_TYPE_PRAM:
return true;
default:
break;


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 20/36] x86, mpparse: Use memremap to map the mpf and mpc data

2017-06-16 Thread Tom Lendacky
The SMP MP-table is built by UEFI and placed in memory in a decrypted
state. These tables are accessed using a mix of early_memremap(),
early_memunmap(), phys_to_virt() and virt_to_phys(). Change all accesses
to use early_memremap()/early_memunmap(). This allows for proper setting
of the encryption mask so that the data can be successfully accessed when
SME is active.

Signed-off-by: Tom Lendacky 
---
 arch/x86/kernel/mpparse.c |   98 -
 1 file changed, 70 insertions(+), 28 deletions(-)

diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index fd37f39..5cbb317 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -429,7 +429,7 @@ static inline void __init construct_default_ISA_mptable(int 
mpc_default_type)
}
 }
 
-static struct mpf_intel *mpf_found;
+static unsigned long mpf_base;
 
 static unsigned long __init get_mpc_size(unsigned long physptr)
 {
@@ -451,6 +451,7 @@ static int __init check_physptr(struct mpf_intel *mpf, 
unsigned int early)
 
size = get_mpc_size(mpf->physptr);
mpc = early_memremap(mpf->physptr, size);
+
/*
 * Read the physical hardware table.  Anything here will
 * override the defaults.
@@ -497,12 +498,12 @@ static int __init check_physptr(struct mpf_intel *mpf, 
unsigned int early)
  */
 void __init default_get_smp_config(unsigned int early)
 {
-   struct mpf_intel *mpf = mpf_found;
+   struct mpf_intel *mpf;
 
if (!smp_found_config)
return;
 
-   if (!mpf)
+   if (!mpf_base)
return;
 
if (acpi_lapic && early)
@@ -515,6 +516,12 @@ void __init default_get_smp_config(unsigned int early)
if (acpi_lapic && acpi_ioapic)
return;
 
+   mpf = early_memremap(mpf_base, sizeof(*mpf));
+   if (!mpf) {
+   pr_err("MPTABLE: error mapping MP table\n");
+   return;
+   }
+
pr_info("Intel MultiProcessor Specification v1.%d\n",
mpf->specification);
 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
@@ -529,7 +536,7 @@ void __init default_get_smp_config(unsigned int early)
/*
 * Now see if we need to read further.
 */
-   if (mpf->feature1 != 0) {
+   if (mpf->feature1) {
if (early) {
/*
 * local APIC has default address
@@ -542,8 +549,10 @@ void __init default_get_smp_config(unsigned int early)
construct_default_ISA_mptable(mpf->feature1);
 
} else if (mpf->physptr) {
-   if (check_physptr(mpf, early))
+   if (check_physptr(mpf, early)) {
+   early_memunmap(mpf, sizeof(*mpf));
return;
+   }
} else
BUG();
 
@@ -552,6 +561,8 @@ void __init default_get_smp_config(unsigned int early)
/*
 * Only use the first configuration found.
 */
+
+   early_memunmap(mpf, sizeof(*mpf));
 }
 
 static void __init smp_reserve_memory(struct mpf_intel *mpf)
@@ -561,15 +572,16 @@ static void __init smp_reserve_memory(struct mpf_intel 
*mpf)
 
 static int __init smp_scan_config(unsigned long base, unsigned long length)
 {
-   unsigned int *bp = phys_to_virt(base);
+   unsigned int *bp;
struct mpf_intel *mpf;
-   unsigned long mem;
+   int ret = 0;
 
apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
base, base + length - 1);
BUILD_BUG_ON(sizeof(*mpf) != 16);
 
while (length > 0) {
+   bp = early_memremap(base, length);
mpf = (struct mpf_intel *)bp;
if ((*bp == SMP_MAGIC_IDENT) &&
(mpf->length == 1) &&
@@ -579,24 +591,26 @@ static int __init smp_scan_config(unsigned long base, 
unsigned long length)
 #ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 1;
 #endif
-   mpf_found = mpf;
+   mpf_base = base;
 
-   pr_info("found SMP MP-table at [mem %#010llx-%#010llx] 
mapped at [%p]\n",
-   (unsigned long long) virt_to_phys(mpf),
-   (unsigned long long) virt_to_phys(mpf) +
-   sizeof(*mpf) - 1, mpf);
+   pr_info("found SMP MP-table at [mem %#010lx-%#010lx] 
mapped at [%p]\n",
+   base, base + sizeof(*mpf) - 1, mpf);
 
-   mem = virt_to_phys(mpf);
-   memblock_reserve(mem, sizeof(*mpf));
+   memblock_reserve(base, sizeof(*mpf));
if (mpf->physptr)
smp_reserve_memory(mpf);
 
-   return 1;
+   ret = 1;
}
-   bp += 4;
+   

[Xen-devel] [PATCH v7 19/36] x86/mm: Add support to access boot related data in the clear

2017-06-16 Thread Tom Lendacky
Boot data (such as EFI related data) is not encrypted when the system is
booted because UEFI/BIOS does not run with SME active. In order to access
this data properly it needs to be mapped decrypted.

Update early_memremap() to provide an arch specific routine to modify the
pagetable protection attributes before they are applied to the new
mapping. This is used to remove the encryption mask for boot related data.

Update memremap() to provide an arch specific routine to determine if RAM
remapping is allowed.  RAM remapping will cause an encrypted mapping to be
generated. By preventing RAM remapping, ioremap_cache() will be used
instead, which will provide a decrypted mapping of the boot related data.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/io.h |5 +
 arch/x86/mm/ioremap.c |  179 +
 include/linux/io.h|2 +
 kernel/memremap.c |   20 -
 mm/early_ioremap.c|   18 -
 5 files changed, 217 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 7afb0e2..09c5557 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -381,4 +381,9 @@ extern int __must_check arch_phys_wc_add(unsigned long base,
 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
 #endif
 
+extern bool arch_memremap_can_ram_remap(resource_size_t offset,
+   unsigned long size,
+   unsigned long flags);
+#define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
+
 #endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 4feda83..f3fa007 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -14,6 +14,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -22,6 +23,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "physaddr.h"
 
@@ -422,6 +424,183 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
 }
 
+/*
+ * Examine the physical address to determine if it is an area of memory
+ * that should be mapped decrypted.  If the memory is not part of the
+ * kernel usable area it was accessed and created decrypted, so these
+ * areas should be mapped decrypted.
+ */
+static bool memremap_should_map_decrypted(resource_size_t phys_addr,
+ unsigned long size)
+{
+   /* Check if the address is outside kernel usable area */
+   switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
+   case E820_TYPE_RESERVED:
+   case E820_TYPE_ACPI:
+   case E820_TYPE_NVS:
+   case E820_TYPE_UNUSABLE:
+   return true;
+   default:
+   break;
+   }
+
+   return false;
+}
+
+/*
+ * Examine the physical address to determine if it is EFI data. Check
+ * it against the boot params structure and EFI tables and memory types.
+ */
+static bool memremap_is_efi_data(resource_size_t phys_addr,
+unsigned long size)
+{
+   u64 paddr;
+
+   /* Check if the address is part of EFI boot/runtime data */
+   if (!efi_enabled(EFI_BOOT))
+   return false;
+
+   paddr = boot_params.efi_info.efi_memmap_hi;
+   paddr <<= 32;
+   paddr |= boot_params.efi_info.efi_memmap;
+   if (phys_addr == paddr)
+   return true;
+
+   paddr = boot_params.efi_info.efi_systab_hi;
+   paddr <<= 32;
+   paddr |= boot_params.efi_info.efi_systab;
+   if (phys_addr == paddr)
+   return true;
+
+   if (efi_is_table_address(phys_addr))
+   return true;
+
+   switch (efi_mem_type(phys_addr)) {
+   case EFI_BOOT_SERVICES_DATA:
+   case EFI_RUNTIME_SERVICES_DATA:
+   return true;
+   default:
+   break;
+   }
+
+   return false;
+}
+
+/*
+ * Examine the physical address to determine if it is boot data by checking
+ * it against the boot params setup_data chain.
+ */
+static bool memremap_is_setup_data(resource_size_t phys_addr,
+  unsigned long size)
+{
+   struct setup_data *data;
+   u64 paddr, paddr_next;
+
+   paddr = boot_params.hdr.setup_data;
+   while (paddr) {
+   unsigned int len;
+
+   if (phys_addr == paddr)
+   return true;
+
+   data = memremap(paddr, sizeof(*data),
+   MEMREMAP_WB | MEMREMAP_DEC);
+
+   paddr_next = data->next;
+   len = data->len;
+
+   memunmap(data);
+
+   if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
+   return true;
+
+   paddr = paddr_next;
+   }
+
+   return false;
+}
+
+/*
+ * Examine the physical address to determine if it is boot data by checking
+ * it 

[Xen-devel] [PATCH v7 18/36] x86/efi: Update EFI pagetable creation to work with SME

2017-06-16 Thread Tom Lendacky
When SME is active, pagetable entries created for EFI need to have the
encryption mask set as necessary.

When the new pagetable pages are allocated they are mapped encrypted. So,
update the efi_pgt value that will be used in cr3 to include the encryption
mask so that the PGD table can be read successfully. The pagetable mapping
as well as the kernel are also added to the pagetable mapping as encrypted.
All other EFI mappings are mapped decrypted (tables, etc.).

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/platform/efi/efi_64.c |   15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 9bf72f5..12e8388 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -327,7 +327,7 @@ void efi_sync_low_kernel_mappings(void)
 
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
-   unsigned long pfn, text;
+   unsigned long pfn, text, pf;
struct page *page;
unsigned npages;
pgd_t *pgd;
@@ -335,7 +335,12 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, 
unsigned num_pages)
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
 
-   efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
+   /*
+* Since the PGD is encrypted, set the encryption mask so that when
+* this value is loaded into cr3 the PGD will be decrypted during
+* the pagetable walk.
+*/
+   efi_scratch.efi_pgt = (pgd_t *)__sme_pa(efi_pgd);
pgd = efi_pgd;
 
/*
@@ -345,7 +350,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, 
unsigned num_pages)
 * phys_efi_set_virtual_address_map().
 */
pfn = pa_memmap >> PAGE_SHIFT;
-   if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX | 
_PAGE_RW)) {
+   pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
+   if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
return 1;
}
@@ -388,7 +394,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, 
unsigned num_pages)
text = __pa(_text);
pfn = text >> PAGE_SHIFT;
 
-   if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, _PAGE_RW)) {
+   pf = _PAGE_RW | _PAGE_ENC;
+   if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
pr_err("Failed to map kernel text 1:1\n");
return 1;
}


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 17/36] efi: Update efi_mem_type() to return an error rather than 0

2017-06-16 Thread Tom Lendacky
The efi_mem_type() function currently returns a 0, which maps to
EFI_RESERVED_TYPE, if the function is unable to find a memmap entry for
the supplied physical address. Returning EFI_RESERVED_TYPE implies that
a memmap entry exists, when it doesn't.  Instead of returning 0, change
the function to return a negative error value when no memmap entry is
found.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/ia64/kernel/efi.c  |4 ++--
 arch/x86/platform/efi/efi.c |6 +++---
 include/linux/efi.h |2 +-
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 1212956..8141600 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -757,14 +757,14 @@ static void __init handle_palo(unsigned long phys_addr)
return 0;
 }
 
-u32
+int
 efi_mem_type (unsigned long phys_addr)
 {
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
 
if (md)
return md->type;
-   return 0;
+   return -EINVAL;
 }
 
 u64
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index f084d87..6217b23 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -1035,12 +1035,12 @@ void __init efi_enter_virtual_mode(void)
 /*
  * Convenience functions to obtain memory types and attributes
  */
-u32 efi_mem_type(unsigned long phys_addr)
+int efi_mem_type(unsigned long phys_addr)
 {
efi_memory_desc_t *md;
 
if (!efi_enabled(EFI_MEMMAP))
-   return 0;
+   return -ENOTSUPP;
 
for_each_efi_memory_desc(md) {
if ((md->phys_addr <= phys_addr) &&
@@ -1048,7 +1048,7 @@ u32 efi_mem_type(unsigned long phys_addr)
  (md->num_pages << EFI_PAGE_SHIFT
return md->type;
}
-   return 0;
+   return -EINVAL;
 }
 
 static int __init arch_parse_efi_cmdline(char *str)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 8e24f09..4e47f78 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -985,7 +985,7 @@ static inline void efi_esrt_init(void) { }
 extern int efi_config_parse_tables(void *config_tables, int count, int sz,
   efi_config_table_type_t *arch_tables);
 extern u64 efi_get_iobase (void);
-extern u32 efi_mem_type (unsigned long phys_addr);
+extern int efi_mem_type(unsigned long phys_addr);
 extern u64 efi_mem_attributes (unsigned long phys_addr);
 extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
 extern int __init efi_uart_console_only (void);


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 16/36] efi: Add an EFI table address match function

2017-06-16 Thread Tom Lendacky
Add a function that will determine if a supplied physical address matches
the address of an EFI table.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 drivers/firmware/efi/efi.c |   33 +
 include/linux/efi.h|7 +++
 2 files changed, 40 insertions(+)

diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index b372aad..983675d 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -55,6 +55,25 @@ struct efi __read_mostly efi = {
 };
 EXPORT_SYMBOL(efi);
 
+static unsigned long *efi_tables[] = {
+   ,
+   ,
+   ,
+   ,
+   ,
+   _systab,
+   _info,
+   ,
+   ,
+   _systab,
+   _vendor,
+   ,
+   _table,
+   ,
+   _table,
+   _attr_table,
+};
+
 static bool disable_runtime;
 static int __init setup_noefi(char *arg)
 {
@@ -854,6 +873,20 @@ int efi_status_to_err(efi_status_t status)
return err;
 }
 
+bool efi_is_table_address(unsigned long phys_addr)
+{
+   unsigned int i;
+
+   if (phys_addr == EFI_INVALID_TABLE_ADDR)
+   return false;
+
+   for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
+   if (*(efi_tables[i]) == phys_addr)
+   return true;
+
+   return false;
+}
+
 #ifdef CONFIG_KEXEC
 static int update_efi_random_seed(struct notifier_block *nb,
  unsigned long code, void *unused)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 8269bcb..8e24f09 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1091,6 +1091,8 @@ static inline bool efi_enabled(int feature)
return test_bit(feature, ) != 0;
 }
 extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
+
+extern bool efi_is_table_address(unsigned long phys_addr);
 #else
 static inline bool efi_enabled(int feature)
 {
@@ -1104,6 +1106,11 @@ static inline bool efi_enabled(int feature)
 {
return false;
 }
+
+static inline bool efi_is_table_address(unsigned long phys_addr)
+{
+   return false;
+}
 #endif
 
 extern int efi_status_to_err(efi_status_t status);


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 14/36] x86/mm: Insure that boot memory areas are mapped properly

2017-06-16 Thread Tom Lendacky
The boot data and command line data are present in memory in a decrypted
state and are copied early in the boot process.  The early page fault
support will map these areas as encrypted, so before attempting to copy
them, add decrypted mappings so the data is accessed properly when copied.

For the initrd, encrypt this data in place. Since the future mapping of
the initrd area will be mapped as encrypted the data will be accessed
properly.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/mem_encrypt.h |6 +++
 arch/x86/include/asm/pgtable.h |3 ++
 arch/x86/kernel/head64.c   |   30 +--
 arch/x86/kernel/setup.c|9 +
 arch/x86/mm/kasan_init_64.c|2 +
 arch/x86/mm/mem_encrypt.c  |   70 
 6 files changed, 115 insertions(+), 5 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index 6508ec9..3ffa5fa 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -26,6 +26,9 @@ void __init sme_early_encrypt(resource_size_t paddr,
 void __init sme_early_decrypt(resource_size_t paddr,
  unsigned long size);
 
+void __init sme_map_bootdata(char *real_mode_data);
+void __init sme_unmap_bootdata(char *real_mode_data);
+
 void __init sme_early_init(void);
 
 void __init sme_enable(void);
@@ -39,6 +42,9 @@ static inline void __init sme_early_encrypt(resource_size_t 
paddr,
 static inline void __init sme_early_decrypt(resource_size_t paddr,
unsigned long size) { }
 
+static inline void __init sme_map_bootdata(char *real_mode_data) { }
+static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
+
 static inline void __init sme_early_init(void) { }
 
 static inline void __init sme_enable(void) { }
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index c6452cb..bbeae4a 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -23,6 +23,9 @@
 #ifndef __ASSEMBLY__
 #include 
 
+extern pgd_t early_top_pgt[PTRS_PER_PGD];
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
+
 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
 void ptdump_walk_pgd_level_checkwx(void);
 
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5d7363a..9e94ed2 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -34,7 +34,6 @@
 /*
  * Manage page tables very early on.
  */
-extern pgd_t early_top_pgt[PTRS_PER_PGD];
 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
 static unsigned int __initdata next_early_pgt;
 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
@@ -161,13 +160,13 @@ static void __init reset_early_page_tables(void)
 }
 
 /* Create a new PMD entry */
-int __init early_make_pgtable(unsigned long address)
+int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
 {
unsigned long physaddr = address - __PAGE_OFFSET;
pgdval_t pgd, *pgd_p;
p4dval_t p4d, *p4d_p;
pudval_t pud, *pud_p;
-   pmdval_t pmd, *pmd_p;
+   pmdval_t *pmd_p;
 
/* Invalid address or early pgt is done ?  */
if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
@@ -226,12 +225,21 @@ int __init early_make_pgtable(unsigned long address)
memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
*pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + 
_KERNPG_TABLE;
}
-   pmd = (physaddr & PMD_MASK) + early_pmd_flags;
pmd_p[pmd_index(address)] = pmd;
 
return 0;
 }
 
+int __init early_make_pgtable(unsigned long address)
+{
+   unsigned long physaddr = address - __PAGE_OFFSET;
+   pmdval_t pmd;
+
+   pmd = (physaddr & PMD_MASK) + early_pmd_flags;
+
+   return __early_make_pgtable(address, pmd);
+}
+
 /* Don't add a printk in there. printk relies on the PDA which is not 
initialized 
yet. */
 static void __init clear_bss(void)
@@ -254,6 +262,12 @@ static void __init copy_bootdata(char *real_mode_data)
char * command_line;
unsigned long cmd_line_ptr;
 
+   /*
+* If SME is active, this will create decrypted mappings of the
+* boot data in advance of the copy operations.
+*/
+   sme_map_bootdata(real_mode_data);
+
memcpy(_params, real_mode_data, sizeof boot_params);
sanitize_boot_params(_params);
cmd_line_ptr = get_cmd_line_ptr();
@@ -261,6 +275,14 @@ static void __init copy_bootdata(char *real_mode_data)
command_line = __va(cmd_line_ptr);
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
}
+
+   /*
+* The old boot data is no longer needed and won't be reserved,
+* freeing up that memory for use by the system. If SME is 

[Xen-devel] [PATCH v7 15/36] x86/boot/e820: Add support to determine the E820 type of an address

2017-06-16 Thread Tom Lendacky
Add a function that will return the E820 type associated with an address
range.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/e820/api.h |2 ++
 arch/x86/kernel/e820.c  |   26 +++---
 2 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h
index 8e0f8b8..3641f5f 100644
--- a/arch/x86/include/asm/e820/api.h
+++ b/arch/x86/include/asm/e820/api.h
@@ -38,6 +38,8 @@
 extern void e820__reallocate_tables(void);
 extern void e820__register_nosave_regions(unsigned long limit_pfn);
 
+extern int  e820__get_entry_type(u64 start, u64 end);
+
 /*
  * Returns true iff the specified range [start,end) is completely contained 
inside
  * the ISA region.
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d78a586..46c9b65 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -84,7 +84,8 @@ bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
  * Note: this function only works correctly once the E820 table is sorted and
  * not-overlapping (at least for the range specified), which is the case 
normally.
  */
-bool __init e820__mapped_all(u64 start, u64 end, enum e820_type type)
+static struct e820_entry *__e820__mapped_all(u64 start, u64 end,
+enum e820_type type)
 {
int i;
 
@@ -110,9 +111,28 @@ bool __init e820__mapped_all(u64 start, u64 end, enum 
e820_type type)
 * coverage of the desired range exists:
 */
if (start >= end)
-   return 1;
+   return entry;
}
-   return 0;
+
+   return NULL;
+}
+
+/*
+ * This function checks if the entire range  is mapped with type.
+ */
+bool __init e820__mapped_all(u64 start, u64 end, enum e820_type type)
+{
+   return __e820__mapped_all(start, end, type);
+}
+
+/*
+ * This function returns the type associated with the range .
+ */
+int e820__get_entry_type(u64 start, u64 end)
+{
+   struct e820_entry *entry = __e820__mapped_all(start, end, 0);
+
+   return entry ? entry->type : -EINVAL;
 }
 
 /*


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 12/36] x86/mm: Extend early_memremap() support with additional attrs

2017-06-16 Thread Tom Lendacky
Add early_memremap() support to be able to specify encrypted and
decrypted mappings with and without write-protection. The use of
write-protection is necessary when encrypting data "in place". The
write-protect attribute is considered cacheable for loads, but not
stores. This implies that the hardware will never give the core a
dirty line with this memtype.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/Kconfig |4 +++
 arch/x86/include/asm/fixmap.h|   13 ++
 arch/x86/include/asm/pgtable_types.h |8 ++
 arch/x86/mm/ioremap.c|   44 ++
 include/asm-generic/early_ioremap.h  |2 ++
 mm/early_ioremap.c   |   10 
 6 files changed, 81 insertions(+)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cf74791..0b09b88 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1433,6 +1433,10 @@ config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
  If set to N, then the encryption of system memory can be
  activated with the mem_encrypt=on command line option.
 
+config ARCH_USE_MEMREMAP_PROT
+   def_bool y
+   depends on AMD_MEM_ENCRYPT
+
 # Common NUMA Features
 config NUMA
bool "Numa Memory Allocation and Scheduler Support"
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index d9ff226..dcd9fb5 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -164,6 +164,19 @@ static inline void __set_fixmap(enum fixed_addresses idx,
  */
 #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_IO_NOCACHE
 
+/*
+ * Early memremap routines used for in-place encryption. The mappings created
+ * by these routines are intended to be used as temporary mappings.
+ */
+void __init *early_memremap_encrypted(resource_size_t phys_addr,
+ unsigned long size);
+void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
+unsigned long size);
+void __init *early_memremap_decrypted(resource_size_t phys_addr,
+ unsigned long size);
+void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
+unsigned long size);
+
 #include 
 
 #define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
diff --git a/arch/x86/include/asm/pgtable_types.h 
b/arch/x86/include/asm/pgtable_types.h
index de32ca3..32095af 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -161,6 +161,7 @@ enum page_cache_mode {
 
 #define _PAGE_CACHE_MASK   (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
 #define _PAGE_NOCACHE  (cachemode2protval(_PAGE_CACHE_MODE_UC))
+#define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
 
 #define PAGE_NONE  __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 #define PAGE_SHARED__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
@@ -189,6 +190,7 @@ enum page_cache_mode {
 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
 #define __PAGE_KERNEL_LARGE(__PAGE_KERNEL | _PAGE_PSE)
 #define __PAGE_KERNEL_LARGE_EXEC   (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+#define __PAGE_KERNEL_WP   (__PAGE_KERNEL | _PAGE_CACHE_WP)
 
 #define __PAGE_KERNEL_IO   (__PAGE_KERNEL)
 #define __PAGE_KERNEL_IO_NOCACHE   (__PAGE_KERNEL_NOCACHE)
@@ -202,6 +204,12 @@ enum page_cache_mode {
 #define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |\
 _PAGE_DIRTY | _PAGE_ENC)
 
+#define __PAGE_KERNEL_ENC  (__PAGE_KERNEL | _PAGE_ENC)
+#define __PAGE_KERNEL_ENC_WP   (__PAGE_KERNEL_WP | _PAGE_ENC)
+
+#define __PAGE_KERNEL_NOENC(__PAGE_KERNEL)
+#define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP)
+
 #define PAGE_KERNEL__pgprot(__PAGE_KERNEL | _PAGE_ENC)
 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
 #define PAGE_KERNEL_EXEC   __pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index a382ba9..4feda83 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -422,6 +422,50 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
 }
 
+#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
+/* Remap memory with encryption */
+void __init *early_memremap_encrypted(resource_size_t phys_addr,
+ unsigned long size)
+{
+   return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
+}
+
+/*
+ * Remap memory with encryption and write-protected - cannot be called
+ * before pat_init() is called
+ */
+void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
+unsigned long size)
+{
+   /* Be sure the write-protect PAT entry is set for write-protect */
+   if 

[Xen-devel] [PATCH v7 13/36] x86/mm: Add support for early encrypt/decrypt of memory

2017-06-16 Thread Tom Lendacky
Add support to be able to either encrypt or decrypt data in place during
the early stages of booting the kernel. This does not change the memory
encryption attribute - it is used for ensuring that data present in either
an encrypted or decrypted memory area is in the proper state (for example
the initrd will have been loaded by the boot loader and will not be
encrypted, but the memory that it resides in is marked as encrypted).

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/mem_encrypt.h |   10 +
 arch/x86/mm/mem_encrypt.c  |   76 
 2 files changed, 86 insertions(+)

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index faae4e1..6508ec9 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,6 +21,11 @@
 
 extern unsigned long sme_me_mask;
 
+void __init sme_early_encrypt(resource_size_t paddr,
+ unsigned long size);
+void __init sme_early_decrypt(resource_size_t paddr,
+ unsigned long size);
+
 void __init sme_early_init(void);
 
 void __init sme_enable(void);
@@ -29,6 +34,11 @@
 
 #define sme_me_mask0UL
 
+static inline void __init sme_early_encrypt(resource_size_t paddr,
+   unsigned long size) { }
+static inline void __init sme_early_decrypt(resource_size_t paddr,
+   unsigned long size) { }
+
 static inline void __init sme_early_init(void) { }
 
 static inline void __init sme_enable(void) { }
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index b2d1cdf..b7671b9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -17,6 +17,9 @@
 
 #include 
 
+#include 
+#include 
+
 /*
  * Since SME related variables are set early in the boot process they must
  * reside in the .data section so as not to be zeroed out when the .bss
@@ -25,6 +28,79 @@
 unsigned long sme_me_mask __section(.data) = 0;
 EXPORT_SYMBOL_GPL(sme_me_mask);
 
+/* Buffer used for early in-place encryption by BSP, no locking needed */
+static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+/*
+ * This routine does not change the underlying encryption setting of the
+ * page(s) that map this memory. It assumes that eventually the memory is
+ * meant to be accessed as either encrypted or decrypted but the contents
+ * are currently not in the desired state.
+ *
+ * This routine follows the steps outlined in the AMD64 Architecture
+ * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
+ */
+static void __init __sme_early_enc_dec(resource_size_t paddr,
+  unsigned long size, bool enc)
+{
+   void *src, *dst;
+   size_t len;
+
+   if (!sme_me_mask)
+   return;
+
+   local_flush_tlb();
+   wbinvd();
+
+   /*
+* There are limited number of early mapping slots, so map (at most)
+* one page at time.
+*/
+   while (size) {
+   len = min_t(size_t, sizeof(sme_early_buffer), size);
+
+   /*
+* Create mappings for the current and desired format of
+* the memory. Use a write-protected mapping for the source.
+*/
+   src = enc ? early_memremap_decrypted_wp(paddr, len) :
+   early_memremap_encrypted_wp(paddr, len);
+
+   dst = enc ? early_memremap_encrypted(paddr, len) :
+   early_memremap_decrypted(paddr, len);
+
+   /*
+* If a mapping can't be obtained to perform the operation,
+* then eventual access of that area in the desired mode
+* will cause a crash.
+*/
+   BUG_ON(!src || !dst);
+
+   /*
+* Use a temporary buffer, of cache-line multiple size, to
+* avoid data corruption as documented in the APM.
+*/
+   memcpy(sme_early_buffer, src, len);
+   memcpy(dst, sme_early_buffer, len);
+
+   early_memunmap(dst, len);
+   early_memunmap(src, len);
+
+   paddr += len;
+   size -= len;
+   }
+}
+
+void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
+{
+   __sme_early_enc_dec(paddr, size, true);
+}
+
+void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
+{
+   __sme_early_enc_dec(paddr, size, false);
+}
+
 void __init sme_early_init(void)
 {
unsigned int i;


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 11/36] x86/mm: Add SME support for read_cr3_pa()

2017-06-16 Thread Tom Lendacky
The cr3 register entry can contain the SME encryption mask that indicates
the PGD is encrypted.  The encryption mask should not be used when
creating a virtual address from the cr3 register, so remove the SME
encryption mask in the read_cr3_pa() function.

During early boot SME will need to use a native version of read_cr3_pa(),
so create native_read_cr3_pa().

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/processor-flags.h |3 ++-
 arch/x86/include/asm/processor.h   |5 +
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/processor-flags.h 
b/arch/x86/include/asm/processor-flags.h
index 79aa2f9..cb6999c 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -2,6 +2,7 @@
 #define _ASM_X86_PROCESSOR_FLAGS_H
 
 #include 
+#include 
 
 #ifdef CONFIG_VM86
 #define X86_VM_MASKX86_EFLAGS_VM
@@ -33,7 +34,7 @@
  */
 #ifdef CONFIG_X86_64
 /* Mask off the address space ID bits. */
-#define CR3_ADDR_MASK 0x7000ull
+#define CR3_ADDR_MASK __sme_clr(0x7000ull)
 #define CR3_PCID_MASK 0xFFFull
 #else
 /*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 8010c97..ab878bd 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -240,6 +240,11 @@ static inline unsigned long read_cr3_pa(void)
return __read_cr3() & CR3_ADDR_MASK;
 }
 
+static inline unsigned long native_read_cr3_pa(void)
+{
+   return __native_read_cr3() & CR3_ADDR_MASK;
+}
+
 static inline void load_cr3(pgd_t *pgdir)
 {
write_cr3(__sme_pa(pgdir));


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 08/36] x86/mm: Add support to enable SME in early boot processing

2017-06-16 Thread Tom Lendacky
Add support to the early boot code to use Secure Memory Encryption (SME).
Since the kernel has been loaded into memory in a decrypted state, encrypt
the kernel in place and update the early pagetables with the memory
encryption mask so that new pagetable entries will use memory encryption.

The routines to set the encryption mask and perform the encryption are
stub routines for now with functionality to be added in a later patch.

Because of the need to have the routines available to head_64.S, the
mem_encrypt.c is always built and #ifdefs in mem_encrypt.c will provide
functionality or stub routines depending on CONFIG_AMD_MEM_ENCRYPT.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/mem_encrypt.h |8 +++
 arch/x86/kernel/head64.c   |   33 +-
 arch/x86/kernel/head_64.S  |   39 ++--
 arch/x86/mm/Makefile   |4 +---
 arch/x86/mm/mem_encrypt.c  |   24 ++
 5 files changed, 93 insertions(+), 15 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index a105796..988b336 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -15,16 +15,24 @@
 
 #ifndef __ASSEMBLY__
 
+#include 
+
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 
 extern unsigned long sme_me_mask;
 
+void __init sme_enable(void);
+
 #else  /* !CONFIG_AMD_MEM_ENCRYPT */
 
 #define sme_me_mask0UL
 
+static inline void __init sme_enable(void) { }
+
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
+unsigned long sme_get_me_mask(void);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 2b2ac38..95979c3 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -14,6 +14,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -46,6 +47,7 @@ static void __init *fixup_pointer(void *ptr, unsigned long 
physaddr)
 void __init __startup_64(unsigned long physaddr)
 {
unsigned long load_delta, *p;
+   unsigned long pgtable_flags;
pgdval_t *pgd;
p4dval_t *p4d;
pudval_t *pud;
@@ -66,6 +68,12 @@ void __init __startup_64(unsigned long physaddr)
if (load_delta & ~PMD_PAGE_MASK)
for (;;);
 
+   /* Activate Secure Memory Encryption (SME) if supported and enabled */
+   sme_enable();
+
+   /* Include the SME encryption mask in the fixup value */
+   load_delta += sme_get_me_mask();
+
/* Fixup the physical addresses in the page table */
 
pgd = fixup_pointer(_top_pgt, physaddr);
@@ -92,28 +100,30 @@ void __init __startup_64(unsigned long physaddr)
 
pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+   pgtable_flags = _KERNPG_TABLE + sme_get_me_mask();
 
if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], 
physaddr);
 
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
-   pgd[i + 0] = (pgdval_t)p4d + _KERNPG_TABLE;
-   pgd[i + 1] = (pgdval_t)p4d + _KERNPG_TABLE;
+   pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
+   pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
 
i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
-   p4d[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
-   p4d[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
+   p4d[i + 0] = (pgdval_t)pud + pgtable_flags;
+   p4d[i + 1] = (pgdval_t)pud + pgtable_flags;
} else {
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
-   pgd[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
-   pgd[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
+   pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
+   pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
}
 
i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
-   pud[i + 0] = (pudval_t)pmd + _KERNPG_TABLE;
-   pud[i + 1] = (pudval_t)pmd + _KERNPG_TABLE;
+   pud[i + 0] = (pudval_t)pmd + pgtable_flags;
+   pud[i + 1] = (pudval_t)pmd + pgtable_flags;
 
pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+   pmd_entry += sme_get_me_mask();
pmd_entry +=  physaddr;
 
for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
@@ -134,9 +144,12 @@ void __init __startup_64(unsigned long physaddr)
pmd[i] += load_delta;
}
 
-   /* Fixup phys_base */
+   /*
+* Fixup phys_base - remove the memory encryption mask to obtain
+* the true physical address.
+*/
p = fixup_pointer(_base, physaddr);
-   *p += load_delta;
+   *p += load_delta - sme_get_me_mask();
 }
 
 /* Wipe all early page tables except for the kernel symbol map */
diff 

[Xen-devel] [PATCH v7 06/36] x86/mm: Add Secure Memory Encryption (SME) support

2017-06-16 Thread Tom Lendacky
Add support for Secure Memory Encryption (SME). This initial support
provides a Kconfig entry to build the SME support into the kernel and
defines the memory encryption mask that will be used in subsequent
patches to mark pages as encrypted.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/Kconfig   |   26 ++
 arch/x86/include/asm/mem_encrypt.h |   30 ++
 arch/x86/mm/Makefile   |1 +
 arch/x86/mm/mem_encrypt.c  |   21 +
 include/linux/mem_encrypt.h|   35 +++
 5 files changed, 113 insertions(+)
 create mode 100644 arch/x86/include/asm/mem_encrypt.h
 create mode 100644 arch/x86/mm/mem_encrypt.c
 create mode 100644 include/linux/mem_encrypt.h

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7c991d0..cf74791 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1407,6 +1407,32 @@ config X86_DIRECT_GBPAGES
  supports them), so don't confuse the user by printing
  that we have them enabled.
 
+config ARCH_HAS_MEM_ENCRYPT
+   def_bool y
+   depends on X86
+
+config AMD_MEM_ENCRYPT
+   bool "AMD Secure Memory Encryption (SME) support"
+   depends on X86_64 && CPU_SUP_AMD
+   ---help---
+ Say yes to enable support for the encryption of system memory.
+ This requires an AMD processor that supports Secure Memory
+ Encryption (SME).
+
+config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
+   bool "Activate AMD Secure Memory Encryption (SME) by default"
+   default y
+   depends on AMD_MEM_ENCRYPT
+   ---help---
+ Say yes to have system memory encrypted by default if running on
+ an AMD processor that supports Secure Memory Encryption (SME).
+
+ If set to Y, then the encryption of system memory can be
+ deactivated with the mem_encrypt=off command line option.
+
+ If set to N, then the encryption of system memory can be
+ activated with the mem_encrypt=on command line option.
+
 # Common NUMA Features
 config NUMA
bool "Numa Memory Allocation and Scheduler Support"
diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
new file mode 100644
index 000..a105796
--- /dev/null
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -0,0 +1,30 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __X86_MEM_ENCRYPT_H__
+#define __X86_MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+
+extern unsigned long sme_me_mask;
+
+#else  /* !CONFIG_AMD_MEM_ENCRYPT */
+
+#define sme_me_mask0UL
+
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 0fbdcb6..a94a7b6 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_X86_INTEL_MPX)   += mpx.o
 obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
 obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
 
+obj-$(CONFIG_AMD_MEM_ENCRYPT)  += mem_encrypt.o
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
new file mode 100644
index 000..b99d469
--- /dev/null
+++ b/arch/x86/mm/mem_encrypt.c
@@ -0,0 +1,21 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include 
+
+/*
+ * Since SME related variables are set early in the boot process they must
+ * reside in the .data section so as not to be zeroed out when the .bss
+ * section is later cleared.
+ */
+unsigned long sme_me_mask __section(.data) = 0;
+EXPORT_SYMBOL_GPL(sme_me_mask);
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
new file mode 100644
index 000..59769f7
--- /dev/null
+++ b/include/linux/mem_encrypt.h
@@ -0,0 +1,35 @@
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MEM_ENCRYPT_H__
+#define __MEM_ENCRYPT_H__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARCH_HAS_MEM_ENCRYPT
+
+#include 
+
+#else  /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
+
+#define sme_me_mask 

[Xen-devel] [PATCH v7 10/36] x86/mm: Provide general kernel support for memory encryption

2017-06-16 Thread Tom Lendacky
Changes to the existing page table macros will allow the SME support to
be enabled in a simple fashion with minimal changes to files that use these
macros.  Since the memory encryption mask will now be part of the regular
pagetable macros, we introduce two new macros (_PAGE_TABLE_NOENC and
_KERNPG_TABLE_NOENC) to allow for early pagetable creation/initialization
without the encryption mask before SME becomes active.  Two new pgprot()
macros are defined to allow setting or clearing the page encryption mask.

The FIXMAP_PAGE_NOCACHE define is introduced for use with MMIO.  SME does
not support encryption for MMIO areas so this define removes the encryption
mask from the page attribute.

Two new macros are introduced (__sme_pa() / __sme_pa_nodebug()) to allow
creating a physical address with the encryption mask.  These are used when
working with the cr3 register so that the PGD can be encrypted. The current
__va() macro is updated so that the virtual address is generated based off
of the physical address without the encryption mask thus allowing the same
virtual address to be generated regardless of whether encryption is enabled
for that physical location or not.

Also, an early initialization function is added for SME.  If SME is active,
this function:
 - Updates the early_pmd_flags so that early page faults create mappings
   with the encryption mask.
 - Updates the __supported_pte_mask to include the encryption mask.
 - Updates the protection_map entries to include the encryption mask so
   that user-space allocations will automatically have the encryption mask
   applied.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/boot/compressed/pagetable.c |7 +
 arch/x86/include/asm/fixmap.h|7 +
 arch/x86/include/asm/mem_encrypt.h   |   13 ++
 arch/x86/include/asm/page_types.h|3 ++
 arch/x86/include/asm/pgtable.h   |9 +++
 arch/x86/include/asm/pgtable_types.h |   45 ++
 arch/x86/include/asm/processor.h |3 ++
 arch/x86/kernel/espfix_64.c  |2 +-
 arch/x86/kernel/head64.c |   11 +++-
 arch/x86/kernel/head_64.S|   20 ---
 arch/x86/mm/kasan_init_64.c  |4 ++-
 arch/x86/mm/mem_encrypt.c|   18 ++
 arch/x86/mm/pageattr.c   |3 ++
 include/asm-generic/pgtable.h|8 ++
 include/linux/mem_encrypt.h  |8 ++
 15 files changed, 128 insertions(+), 33 deletions(-)

diff --git a/arch/x86/boot/compressed/pagetable.c 
b/arch/x86/boot/compressed/pagetable.c
index 8e69df9..246bf29 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -15,6 +15,13 @@
 #define __pa(x)  ((unsigned long)(x))
 #define __va(x)  ((void *)((unsigned long)(x)))
 
+/*
+ * The pgtable.h and mm/ident_map.c includes make use of the SME related
+ * information which is not used in the compressed image support. Un-define
+ * the SME support to avoid any compile and link errors.
+ */
+#undef CONFIG_AMD_MEM_ENCRYPT
+
 #include "misc.h"
 
 /* These actually do the work of building the kernel identity maps. */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index b65155c..d9ff226 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -157,6 +157,13 @@ static inline void __set_fixmap(enum fixed_addresses idx,
 }
 #endif
 
+/*
+ * FIXMAP_PAGE_NOCACHE is used for MMIO. Memory encryption is not
+ * supported for MMIO addresses, so make sure that the memory encryption
+ * mask is not part of the page attributes.
+ */
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_IO_NOCACHE
+
 #include 
 
 #define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
diff --git a/arch/x86/include/asm/mem_encrypt.h 
b/arch/x86/include/asm/mem_encrypt.h
index 988b336..faae4e1 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -21,18 +21,31 @@
 
 extern unsigned long sme_me_mask;
 
+void __init sme_early_init(void);
+
 void __init sme_enable(void);
 
 #else  /* !CONFIG_AMD_MEM_ENCRYPT */
 
 #define sme_me_mask0UL
 
+static inline void __init sme_early_init(void) { }
+
 static inline void __init sme_enable(void) { }
 
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
 unsigned long sme_get_me_mask(void);
 
+/*
+ * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
+ * writing to or comparing values from the cr3 register.  Having the
+ * encryption mask set in cr3 enables the PGD entry to be encrypted and
+ * avoid special case handling of PGD allocations.
+ */
+#define __sme_pa(x)(__pa(x) | sme_me_mask)
+#define __sme_pa_nodebug(x)(__pa_nodebug(x) | sme_me_mask)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/page_types.h 
b/arch/x86/include/asm/page_types.h
index 7bd0099..b98ed9d 100644
--- 

[Xen-devel] [PATCH v7 09/36] x86/mm: Simplify p[gum]d_page() macros

2017-06-16 Thread Tom Lendacky
Create a pgd_pfn() macro similar to the p[um]d_pfn() macros and then
use the p[gum]d_pfn() macros in the p[gum]d_page() macros instead of
duplicating the code.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/pgtable.h |   16 +---
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 77037b6..b64ea52 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -195,6 +195,11 @@ static inline unsigned long p4d_pfn(p4d_t p4d)
return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
 }
 
+static inline unsigned long pgd_pfn(pgd_t pgd)
+{
+   return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
 static inline int p4d_large(p4d_t p4d)
 {
/* No 512 GiB pages yet */
@@ -704,8 +709,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pmd_page(pmd)  \
-   pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
+#define pmd_page(pmd)  pfn_to_page(pmd_pfn(pmd))
 
 /*
  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
@@ -773,8 +777,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pud_page(pud)  \
-   pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
+#define pud_page(pud)  pfn_to_page(pud_pfn(pud))
 
 /* Find an entry in the second-level page table.. */
 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
@@ -824,8 +827,7 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d)
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define p4d_page(p4d)  \
-   pfn_to_page((p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT)
+#define p4d_page(p4d)  pfn_to_page(p4d_pfn(p4d))
 
 /* Find an entry in the third-level page table.. */
 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
@@ -859,7 +861,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  * Currently stuck as a macro due to indirect forward reference to
  * linux/mmzone.h's __section_mem_map_addr() definition:
  */
-#define pgd_page(pgd)  pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+#define pgd_page(pgd)  pfn_to_page(pgd_pfn(pgd))
 
 /* to find an entry in a page-table-directory. */
 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 07/36] x86/mm: Don't use phys_to_virt in ioremap() if SME is active

2017-06-16 Thread Tom Lendacky
Currently there is a check if the address being mapped is in the ISA
range (is_ISA_range()), and if it is then phys_to_virt() is used to
perform the mapping.  When SME is active, however, this will result
in the mapping having the encryption bit set when it is expected that
an ioremap() should not have the encryption bit set. So only use the
phys_to_virt() function if SME is not active

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/mm/ioremap.c |7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 4c1b5fd..a382ba9 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -13,6 +13,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include 
 #include 
@@ -106,9 +107,11 @@ static void __iomem *__ioremap_caller(resource_size_t 
phys_addr,
}
 
/*
-* Don't remap the low PCI/ISA area, it's always mapped..
+* Don't remap the low PCI/ISA area, it's always mapped.
+*   But if SME is active, skip this so that the encryption bit
+*   doesn't get set.
 */
-   if (is_ISA_range(phys_addr, last_addr))
+   if (is_ISA_range(phys_addr, last_addr) && !sme_active())
return (__force void __iomem *)phys_to_virt(phys_addr);
 
/*


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 05/36] x86/CPU/AMD: Handle SME reduction in physical address size

2017-06-16 Thread Tom Lendacky
When System Memory Encryption (SME) is enabled, the physical address
space is reduced. Adjust the x86_phys_bits value to reflect this
reduction.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/kernel/cpu/amd.c |   10 +++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c47ceee..5bdcbd4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -613,15 +613,19 @@ static void early_init_amd(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_E400);
 
/*
-* BIOS support is required for SME. If BIOS has not enabled SME
-* then don't advertise the feature (set in scattered.c)
+* BIOS support is required for SME. If BIOS has enabld SME then
+* adjust x86_phys_bits by the SME physical address space reduction
+* value. If BIOS has not enabled SME then don't advertise the
+* feature (set in scattered.c).
 */
if (cpu_has(c, X86_FEATURE_SME)) {
u64 msr;
 
/* Check if SME is enabled */
rdmsrl(MSR_K8_SYSCFG, msr);
-   if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+   if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT)
+   c->x86_phys_bits -= (cpuid_ebx(0x801f) >> 6) & 0x3f;
+   else
clear_cpu_cap(c, X86_FEATURE_SME);
}
 }


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 03/36] x86, mpparse, x86/acpi, x86/PCI, x86/dmi, SFI: Use memremap for RAM mappings

2017-06-16 Thread Tom Lendacky
The ioremap() function is intended for mapping MMIO. For RAM, the
memremap() function should be used. Convert calls from ioremap() to
memremap() when re-mapping RAM.

This will be used later by SME to control how the encryption mask is
applied to memory mappings, with certain memory locations being mapped
decrypted vs encrypted.

Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/dmi.h   |8 
 arch/x86/kernel/acpi/boot.c  |6 +++---
 arch/x86/kernel/kdebugfs.c   |   34 +++---
 arch/x86/kernel/ksysfs.c |   28 ++--
 arch/x86/kernel/mpparse.c|   10 +-
 arch/x86/pci/common.c|4 ++--
 drivers/firmware/dmi-sysfs.c |5 +++--
 drivers/firmware/pcdp.c  |4 ++--
 drivers/sfi/sfi_core.c   |   22 +++---
 9 files changed, 55 insertions(+), 66 deletions(-)

diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index 3c69fed..a8e15b0 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -13,9 +13,9 @@ static __always_inline __init void *dmi_alloc(unsigned len)
 }
 
 /* Use early IO mappings for DMI because it's initialized early */
-#define dmi_early_remapearly_ioremap
-#define dmi_early_unmapearly_iounmap
-#define dmi_remap  ioremap_cache
-#define dmi_unmap  iounmap
+#define dmi_early_remapearly_memremap
+#define dmi_early_unmapearly_memunmap
+#define dmi_remap(_x, _l)  memremap(_x, _l, MEMREMAP_WB)
+#define dmi_unmap(_x)  memunmap(_x)
 
 #endif /* _ASM_X86_DMI_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6bb6806..850160a 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -115,7 +115,7 @@
 #defineACPI_INVALID_GSIINT_MIN
 
 /*
- * This is just a simple wrapper around early_ioremap(),
+ * This is just a simple wrapper around early_memremap(),
  * with sanity checks for phys == 0 and size == 0.
  */
 char *__init __acpi_map_table(unsigned long phys, unsigned long size)
@@ -124,7 +124,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned 
long size)
if (!phys || !size)
return NULL;
 
-   return early_ioremap(phys, size);
+   return early_memremap(phys, size);
 }
 
 void __init __acpi_unmap_table(char *map, unsigned long size)
@@ -132,7 +132,7 @@ void __init __acpi_unmap_table(char *map, unsigned long 
size)
if (!map || !size)
return;
 
-   early_iounmap(map, size);
+   early_memunmap(map, size);
 }
 
 #ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 38b6458..fd6f8fb 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -33,7 +33,6 @@ static ssize_t setup_data_read(struct file *file, char __user 
*user_buf,
struct setup_data_node *node = file->private_data;
unsigned long remain;
loff_t pos = *ppos;
-   struct page *pg;
void *p;
u64 pa;
 
@@ -47,18 +46,13 @@ static ssize_t setup_data_read(struct file *file, char 
__user *user_buf,
count = node->len - pos;
 
pa = node->paddr + sizeof(struct setup_data) + pos;
-   pg = pfn_to_page((pa + count - 1) >> PAGE_SHIFT);
-   if (PageHighMem(pg)) {
-   p = ioremap_cache(pa, count);
-   if (!p)
-   return -ENXIO;
-   } else
-   p = __va(pa);
+   p = memremap(pa, count, MEMREMAP_WB);
+   if (!p)
+   return -ENOMEM;
 
remain = copy_to_user(user_buf, p, count);
 
-   if (PageHighMem(pg))
-   iounmap(p);
+   memunmap(p);
 
if (remain)
return -EFAULT;
@@ -109,7 +103,6 @@ static int __init create_setup_data_nodes(struct dentry 
*parent)
struct setup_data *data;
int error;
struct dentry *d;
-   struct page *pg;
u64 pa_data;
int no = 0;
 
@@ -126,16 +119,12 @@ static int __init create_setup_data_nodes(struct dentry 
*parent)
goto err_dir;
}
 
-   pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
-   if (PageHighMem(pg)) {
-   data = ioremap_cache(pa_data, sizeof(*data));
-   if (!data) {
-   kfree(node);
-   error = -ENXIO;
-   goto err_dir;
-   }
-   } else
-   data = __va(pa_data);
+   data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
+   if (!data) {
+   kfree(node);
+   error = -ENOMEM;
+   goto err_dir;
+   }
 
node->paddr = pa_data;
node->type = 

[Xen-devel] [PATCH v7 04/36] x86/CPU/AMD: Add the Secure Memory Encryption CPU feature

2017-06-16 Thread Tom Lendacky
Update the CPU features to include identifying and reporting on the
Secure Memory Encryption (SME) feature.  SME is identified by CPUID
0x801f, but requires BIOS support to enable it (set bit 23 of
MSR_K8_SYSCFG).  Only show the SME feature as available if reported by
CPUID and enabled by BIOS.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/include/asm/cpufeatures.h |1 +
 arch/x86/include/asm/msr-index.h   |2 ++
 arch/x86/kernel/cpu/amd.c  |   13 +
 arch/x86/kernel/cpu/scattered.c|1 +
 4 files changed, 17 insertions(+)

diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
index 2701e5f..2b692df 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -196,6 +196,7 @@
 
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_SME( 7*32+10) /* AMD Secure Memory 
Encryption */
 
 #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number 
*/
 #define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 18b1623..460ac01 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -352,6 +352,8 @@
 #define MSR_K8_TOP_MEM10xc001001a
 #define MSR_K8_TOP_MEM20xc001001d
 #define MSR_K8_SYSCFG  0xc0010010
+#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT  23
+#define MSR_K8_SYSCFG_MEM_ENCRYPT  BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
 #define MSR_K8_INT_PENDING_MSG 0xc0010055
 /* C1E active bits in int pending message */
 #define K8_INTP_C1E_ACTIVE_MASK0x1800
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bb5abe8..c47ceee 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -611,6 +611,19 @@ static void early_init_amd(struct cpuinfo_x86 *c)
 */
if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_E400);
+
+   /*
+* BIOS support is required for SME. If BIOS has not enabled SME
+* then don't advertise the feature (set in scattered.c)
+*/
+   if (cpu_has(c, X86_FEATURE_SME)) {
+   u64 msr;
+
+   /* Check if SME is enabled */
+   rdmsrl(MSR_K8_SYSCFG, msr);
+   if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+   clear_cpu_cap(c, X86_FEATURE_SME);
+   }
 }
 
 static void init_amd_k8(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 23c2350..05459ad 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,6 +31,7 @@ struct cpuid_bit {
{ X86_FEATURE_HW_PSTATE,CPUID_EDX,  7, 0x8007, 0 },
{ X86_FEATURE_CPB,  CPUID_EDX,  9, 0x8007, 0 },
{ X86_FEATURE_PROC_FEEDBACK,CPUID_EDX, 11, 0x8007, 0 },
+   { X86_FEATURE_SME,  CPUID_EAX,  0, 0x801f, 0 },
{ 0, 0, 0, 0, 0 }
 };
 


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 02/36] x86/mm/pat: Set write-protect cache mode for full PAT support

2017-06-16 Thread Tom Lendacky
For processors that support PAT, set the write-protect cache mode
(_PAGE_CACHE_MODE_WP) entry to the actual write-protect value (x05).

Acked-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 arch/x86/mm/pat.c |6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 9b78685..6753d9c 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -295,7 +295,7 @@ static void init_cache_modes(void)
  * pat_init - Initialize PAT MSR and PAT table
  *
  * This function initializes PAT MSR and PAT table with an OS-defined value
- * to enable additional cache attributes, WC and WT.
+ * to enable additional cache attributes, WC, WT and WP.
  *
  * This function must be called on all CPUs using the specific sequence of
  * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
@@ -356,7 +356,7 @@ void pat_init(void)
 *  0102UC-: _PAGE_CACHE_MODE_UC_MINUS
 *  0113UC : _PAGE_CACHE_MODE_UC
 *  1004WB : Reserved
-*  1015WC : Reserved
+*  1015WP : _PAGE_CACHE_MODE_WP
 *  1106UC-: Reserved
 *  1117WT : _PAGE_CACHE_MODE_WT
 *
@@ -364,7 +364,7 @@ void pat_init(void)
 * corresponding types in the presence of PAT errata.
 */
pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
- PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, WT);
+ PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
}
 
if (!boot_cpu_done) {


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v7 00/36] x86: Secure Memory Encryption (AMD)

2017-06-16 Thread Tom Lendacky
This patch series provides support for AMD's new Secure Memory Encryption (SME)
feature.

SME can be used to mark individual pages of memory as encrypted through the
page tables. A page of memory that is marked encrypted will be automatically
decrypted when read from DRAM and will be automatically encrypted when
written to DRAM. Details on SME can found in the links below.

The SME feature is identified through a CPUID function and enabled through
the SYSCFG MSR. Once enabled, page table entries will determine how the
memory is accessed. If a page table entry has the memory encryption mask set,
then that memory will be accessed as encrypted memory. The memory encryption
mask (as well as other related information) is determined from settings
returned through the same CPUID function that identifies the presence of the
feature.

The approach that this patch series takes is to encrypt everything possible
starting early in the boot where the kernel is encrypted. Using the page
table macros the encryption mask can be incorporated into all page table
entries and page allocations. By updating the protection map, userspace
allocations are also marked encrypted. Certain data must be accounted for
as having been placed in memory before SME was enabled (EFI, initrd, etc.)
and accessed accordingly.

This patch series is a pre-cursor to another AMD processor feature called
Secure Encrypted Virtualization (SEV). The support for SEV will build upon
the SME support and will be submitted later. Details on SEV can be found
in the links below.

The following links provide additional detail:

AMD Memory Encryption whitepaper:
   
http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf

AMD64 Architecture Programmer's Manual:
   http://support.amd.com/TechDocs/24593.pdf
   SME is section 7.10
   SEV is section 15.34

---

This patch series is based off of the master branch of tip:
  https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git master

  Commit 3d55328fd1f8 ("Merge branch 'WIP.x86/fpu'")

Source code is also available at https://github.com/codomania/tip/tree/sme-v7


Still to do:
- Kdump support, including using memremap() instead of ioremap_cache()

Changes since v6:
- Fixed the asm include file issue that caused build errors on other archs
- Rebased the CR3 register changes on top of Andy Lutomirski's patch
- Added a patch to clear the SME cpu feature if running as a PV guest under
  Xen
- Added a patch to obtain the AMD microcode level earlier in the boot
  instead of directly reading the MSR
- Refactor patch #8 ("x86/mm: Add support to enable SME in early boot
  processing") because the 5-level paging support moved the code into the
  new C-function __startup_64()
- Removed need to decrypt trampoline area in-place (set memory attributes
  before copying the trampoline code)
- General code cleanup based on feedback

Changes since v5:
- Added support for 5-level paging
- Added IOMMU support
- Created a generic asm/mem_encrypt.h in order to remove a bunch of
  #ifndef/#define entries
- Removed changes to the __va() macro and defined a function to return
  the true physical address in cr3
- Removed sysfs support as it was determined not to be needed
- General code cleanup based on feedback
- General cleanup of patch subjects and descriptions

Changes since v4:
- Re-worked mapping of setup data to not use a fixed list. Rather, check
  dynamically whether the requested early_memremap()/memremap() call
  needs to be mapped decrypted.
- Moved SME cpu feature into scattered features
- Moved some declarations into header files
- Cleared the encryption mask from the __PHYSICAL_MASK so that users
  of macros such as pmd_pfn_mask() don't have to worry/know about the
  encryption mask
- Updated some return types and values related to EFI and e820 functions
  so that an error could be returned
- During cpu shutdown, removed cache disabling and added a check for kexec
  in progress to use wbinvd followed immediately by halt in order to avoid
  any memory corruption
- Update how persistent memory is identified
- Added a function to find command line arguments and their values
- Added sysfs support
- General code cleanup based on feedback
- General cleanup of patch subjects and descriptions


Changes since v3:
- Broke out some of the patches into smaller individual patches
- Updated Documentation
- Added a message to indicate why the IOMMU was disabled
- Updated CPU feature support for SME by taking into account whether
  BIOS has enabled SME
- Eliminated redundant functions
- Added some warning messages for DMA usage of bounce buffers when SME
  is active
- Added support for persistent memory
- Added support to determine when setup data is being mapped and be sure
  to map it un-encrypted
- Added CONFIG support to set the default action of whether to activate
  SME if it is supported/enabled
- Added support for (re)booting with kexec

Changes since v2:
- Updated 

[Xen-devel] [PATCH v7 01/36] x86: Document AMD Secure Memory Encryption (SME)

2017-06-16 Thread Tom Lendacky
Create a Documentation entry to describe the AMD Secure Memory
Encryption (SME) feature and add documentation for the mem_encrypt=
kernel parameter.

Reviewed-by: Borislav Petkov 
Signed-off-by: Tom Lendacky 
---
 Documentation/admin-guide/kernel-parameters.txt |   11 
 Documentation/x86/amd-memory-encryption.txt |   68 +++
 2 files changed, 79 insertions(+)
 create mode 100644 Documentation/x86/amd-memory-encryption.txt

diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
index ee5c65a..9edc0b7 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2197,6 +2197,17 @@
memory contents and reserves bad memory
regions that are detected.
 
+   mem_encrypt=[X86-64] AMD Secure Memory Encryption (SME) control
+   Valid arguments: on, off
+   Default (depends on kernel configuration option):
+ on  (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y)
+ off (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n)
+   mem_encrypt=on: Activate SME
+   mem_encrypt=off:Do not activate SME
+
+   Refer to Documentation/x86/amd-memory-encryption.txt
+   for details on when memory encryption can be activated.
+
mem_sleep_default=  [SUSPEND] Default system suspend mode:
s2idle  - Suspend-To-Idle
shallow - Power-On Suspend or equivalent (if supported)
diff --git a/Documentation/x86/amd-memory-encryption.txt 
b/Documentation/x86/amd-memory-encryption.txt
new file mode 100644
index 000..f512ab7
--- /dev/null
+++ b/Documentation/x86/amd-memory-encryption.txt
@@ -0,0 +1,68 @@
+Secure Memory Encryption (SME) is a feature found on AMD processors.
+
+SME provides the ability to mark individual pages of memory as encrypted using
+the standard x86 page tables.  A page that is marked encrypted will be
+automatically decrypted when read from DRAM and encrypted when written to
+DRAM.  SME can therefore be used to protect the contents of DRAM from physical
+attacks on the system.
+
+A page is encrypted when a page table entry has the encryption bit set (see
+below on how to determine its position).  The encryption bit can also be
+specified in the cr3 register, allowing the PGD table to be encrypted. Each
+successive level of page tables can also be encrypted by setting the encryption
+bit in the page table entry that points to the next table. This allows the full
+page table hierarchy to be encrypted. Note, this means that just because the
+encryption bit is set in cr3, doesn't imply the full hierarchy is encyrpted.
+Each page table entry in the hierarchy needs to have the encryption bit set to
+achieve that. So, theoretically, you could have the encryption bit set in cr3
+so that the PGD is encrypted, but not set the encryption bit in the PGD entry
+for a PUD which results in the PUD pointed to by that entry to not be
+encrypted.
+
+Support for SME can be determined through the CPUID instruction. The CPUID
+function 0x801f reports information related to SME:
+
+   0x801f[eax]:
+   Bit[0] indicates support for SME
+   0x801f[ebx]:
+   Bits[5:0]  pagetable bit number used to activate memory
+  encryption
+   Bits[11:6] reduction in physical address space, in bits, when
+  memory encryption is enabled (this only affects
+  system physical addresses, not guest physical
+  addresses)
+
+If support for SME is present, MSR 0xc00100010 (MSR_K8_SYSCFG) can be used to
+determine if SME is enabled and/or to enable memory encryption:
+
+   0xc0010010:
+   Bit[23]   0 = memory encryption features are disabled
+ 1 = memory encryption features are enabled
+
+Linux relies on BIOS to set this bit if BIOS has determined that the reduction
+in the physical address space as a result of enabling memory encryption (see
+CPUID information above) will not conflict with the address space resource
+requirements for the system.  If this bit is not set upon Linux startup then
+Linux itself will not set it and memory encryption will not be possible.
+
+The state of SME in the Linux kernel can be documented as follows:
+   - Supported:
+ The CPU supports SME (determined through CPUID instruction).
+
+   - Enabled:
+ Supported and bit 23 of MSR_K8_SYSCFG is set.
+
+   - Active:
+ Supported, Enabled and the Linux kernel is actively applying
+ the encryption bit to page table entries (the SME mask in the
+ kernel is non-zero).
+
+SME can also be enabled and 

Re: [Xen-devel] [RFC PATCH v3 05/10] arm/p2m: Make PTE helpers publicly available

2017-06-16 Thread Sergej Proskurin
Hi Julien,

On 06/16/2017 08:23 PM, Julien Grall wrote:
> Hi Sergej,
> 
> On 06/16/2017 04:44 PM, Sergej Proskurin wrote:
>> Thanks. I have moved the upper helpers to page.h for now and renamed
>> them to lpae_* helpers as part of my most recent patch version. The
>> submission will follow soon.
> 
> They should go in the new file lpae.h (you were CCed on the patch series
> I sent yesterday).
> 

Yes, I saw your patch. However, I wasn't sure if I should use your code
without it being acked. So, moving the definitions to lpae.h is not a
problem at all. I will gladly rebase my code on top of your branch (or
simply use your code if you should send it to me).

> I would also be easier to review if you have 2 patches:
> #1 renaming p2m_* to lpae_* + using walk
> #2 move the helpers in lpae.h
> 

Yes, this is almost exactly how I did it. Whereas I have (i) moved the
code and (ii) renamed the functions and the affected code regions (that
is their definition + in the code, where the functions were actually
called).

> I already have them in my private branch as I need them in other place.
> So I don't mind to send them if it helps you.
> 

Sure, that would be great. Thank you.

Cheers,
~Sergej

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [RFC PATCH v3 05/10] arm/p2m: Make PTE helpers publicly available

2017-06-16 Thread Julien Grall

Hi Sergej,

On 06/16/2017 04:44 PM, Sergej Proskurin wrote:

Thanks. I have moved the upper helpers to page.h for now and renamed
them to lpae_* helpers as part of my most recent patch version. The
submission will follow soon.


They should go in the new file lpae.h (you were CCed on the patch series 
I sent yesterday).


I would also be easier to review if you have 2 patches:
#1 renaming p2m_* to lpae_* + using walk
#2 move the helpers in lpae.h

I already have them in my private branch as I need them in other place. 
So I don't mind to send them if it helps you.


Cheers,

--
Julien Grall

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 03/24] xen/arm: setup: Remove bogus xenheap_mfn_end in setup_mm for arm64

2017-06-16 Thread Julien Grall

Hi Stefano,

On 06/16/2017 06:33 PM, Stefano Stabellini wrote:

On Fri, 16 Jun 2017, Julien Grall wrote:

Hi Stefano,

On 15/06/2017 23:28, Stefano Stabellini wrote:

On Tue, 13 Jun 2017, Julien Grall wrote:

xenheap_mfn_end is storing an MFN and not a physical address. Thankfully
xenheap_mfn_end is not used in the arm64 code. So drop it.


That's fine, but in that case I would prefer to move the definition of
xenheap_mfn_end under #ifdef CONFIG_ARM_32. In fact, there is another
assignment of xenheap_mfn_end few lines below in the arm64 version of
setup_mm: don't we need to remove that too?


The other xenheap_mfn_end contains valid mfn that point to the end and I
didn't want to #ifdef it because:
1) It complexify the code
2) All regions should be bound with start/end to simplify potential
use.


I am only suggesting to move its definition and declaration under #ifdef
CONFIG_ARM_32 in xen/include/asm-arm/mm.h and xen/arch/arm/mm.c.

After that, all users of xenheap_mfn_end are already #ifdef
CONFIG_ARM_32, except for xen/arch/arm/setup.c:setup_mm. The setup_mm
under #ifdef CONFIG_ARM_32 will be fine. The setup_mm under
#ifdef CONFIG_ARM_64, doesn't need xenheap_mfn_end and we could just
remove it from there.

Does it make sense? Am I missing something?


To be honest, I really want to limit the ifdefery in the mm code. This 
is a bit complex to follow. One of my side project is to look at that.


Also, even if xenheap_mfn_end today is not used, I think the current 
value is valid and could be helpful to have in hand. For instance, it 
does not seem justify to have different implementation of at least 
is_xen_heap_page for arm32 and arm64.


So I am not in favor of dropping xenheap_mfn_end at the moment.

Cheers,

--
Julien Grall

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [OSSTEST PATCH v11 11/20] ts-openstack-deploy: Increase fd and memory limits for rabbitmq

2017-06-16 Thread Anthony PERARD
Signed-off-by: Anthony PERARD 
---
 ts-openstack-deploy | 14 ++
 1 file changed, 14 insertions(+)

diff --git a/ts-openstack-deploy b/ts-openstack-deploy
index 2107760..04317a0 100755
--- a/ts-openstack-deploy
+++ b/ts-openstack-deploy
@@ -130,6 +130,20 @@ END
 osstest ALL=(ALL) NOPASSWD:ALL
 Defaults:osstest env_keep += "CURL_CA_BUNDLE"
 END
+
+# Increase some limits of rabbit server (message broker)
+target_cmd_root($ho, <

[Xen-devel] [OSSTEST PATCH v11 14/20] ts-openstack-deploy: Ignore libvirt-python version and use latest

2017-06-16 Thread Anthony PERARD
Devstack is going to try to install a specific version of libvirt-python
(currently 2.5.0) but this fail with libvirt installed by osstest.
Remove the requirement and use the latest available instead.

Signed-off-by: Anthony PERARD 
---
 ts-openstack-deploy | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/ts-openstack-deploy b/ts-openstack-deploy
index 04053de..8f6c7a2 100755
--- a/ts-openstack-deploy
+++ b/ts-openstack-deploy
@@ -93,6 +93,21 @@ END
 }
 );
 
+target_editfile($ho,
+"$builddir/requirements/upper-constraints.txt",
+sub {
+while () {
+# Ignore libvirt-python requirement and install latest,
+# otherwise it's not going to work with latest libvirt
+# installed by osstest.
+if (m/^libvirt-python===.*$/) {
+next;
+}
+print EO or die $!;
+}
+}
+);
+
 # Package python-systemd does not exist in Debian installed by osstest
 target_editfile($ho, "$builddir/devstack/files/debs/general", sub {
 while () {
-- 
Anthony PERARD


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [OSSTEST PATCH v11 10/20] ts-openstack-deploy: Switch to Neutron for network

2017-06-16 Thread Anthony PERARD
nova-network is not supported anymore and Neutron is the default.

Signed-off-by: Anthony PERARD 
---
 ap-common   | 3 ++-
 make-flight | 2 +-
 ts-openstack-deploy | 8 +---
 3 files changed, 4 insertions(+), 9 deletions(-)

diff --git a/ap-common b/ap-common
index 7e84f15..adfdc11 100644
--- a/ap-common
+++ b/ap-common
@@ -55,7 +55,8 @@
 : ${BASE_TREE_OVMF:=git://xenbits.xen.org/osstest/ovmf.git}
 
 define_openstack_trees() {
-local openstack_trees=(cinder glance keystone nova requirements tempest)
+local openstack_trees=(cinder glance keystone neutron nova requirements
+tempest)
 local tree
 local url
 
diff --git a/make-flight b/make-flight
index 3235cca..ff4f17e 100755
--- a/make-flight
+++ b/make-flight
@@ -696,7 +696,7 @@ do_examine_one () {
 
 do_openstack_tests () {
 local xsms=$(xenbranch_xsm_variants)
-local openstack_trees=(cinder devstack glance keystone nova
+local openstack_trees=(cinder devstack glance keystone neutron nova
 requirements tempest)
 
 if [ $branch != openstack-nova ]; then
diff --git a/ts-openstack-deploy b/ts-openstack-deploy
index 1349009..2107760 100755
--- a/ts-openstack-deploy
+++ b/ts-openstack-deploy
@@ -35,6 +35,7 @@ sub checkout () {
 build_clone($ho, 'openstack_devstack', $builddir, 'devstack');
 build_clone($ho, 'openstack_glance',   $builddir, 'glance');
 build_clone($ho, 'openstack_keystone', $builddir, 'keystone');
+build_clone($ho, 'openstack_neutron',  $builddir, 'neutron');
 build_clone($ho, 'openstack_nova', $builddir, 'nova');
 build_clone($ho, 'openstack_requirements', $builddir, 'requirements');
 build_clone($ho, 'openstack_tempest',  $builddir, 'tempest');
@@ -63,13 +64,6 @@ LIBVIRT_TYPE=xen
 disable_service horizon
 disable_service n-novnc
 disable_service dstat
-# Disable neutron and switch back to nova-network
-disable_service q-svc
-disable_service q-dhcp
-disable_service q-meta
-disable_service q-agt
-disable_service q-l3
-enable_service n-net
 USE_SYSTEMD=False
 # To keep systemd off, we need to enable use of screen
 USE_SCREEN=True
-- 
Anthony PERARD


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [OSSTEST PATCH v11 12/20] make-flight: Increase dom0_mem for openstack flight

2017-06-16 Thread Anthony PERARD
With 4G for dom0_mem, a host running devstack is using about 1.5G of
swap.

Signed-off-by: Anthony PERARD 
---
 make-flight | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/make-flight b/make-flight
index ff4f17e..a0c9d2b 100755
--- a/make-flight
+++ b/make-flight
@@ -714,7 +714,7 @@ do_openstack_tests () {
 job_create_test test-$xenarch$kern-$dom0arch-devstack \
 test-devstack libvirt $xenarch $dom0arch \
 $os_runvars \
-dom0_mem=4000 \
+dom0_mem=6000 \
 enable_xsm=$xsm \
 all_hostflags=$most_hostflags
 done
-- 
Anthony PERARD


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [OSSTEST PATCH v11 19/20] ts-openstack-deploy: Increase devstack timeout

2017-06-16 Thread Anthony PERARD
Signed-off-by: Anthony PERARD 
---
 ts-openstack-deploy | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ts-openstack-deploy b/ts-openstack-deploy
index cffbb5d..c21689d 100755
--- a/ts-openstack-deploy
+++ b/ts-openstack-deploy
@@ -177,7 +177,7 @@ sub deploy() {
 $httpproxy .=
 "\nCURL_CA_BUNDLE=$mitmcert; export CURL_CA_BUNDLE"
 if $mitmcert;
-target_cmd($ho, <

[Xen-devel] [OSSTEST PATCH v11 20/20] Introduce flight for stable branches of OpenStack

2017-06-16 Thread Anthony PERARD
OpenStack have many different repo which should be in sync, so this
patch should grab the revisions of the stable branch of every OpenStack
tree. Tempest does not have stable branch and should be able to test any
OpenStack version.

This patch also create flight for the latest release of OpenStack named
Ocata instead of testing branch master, as the scripts are not yet ready
to test it.

Signed-off-by: Anthony PERARD 
---
 ap-fetch-version | 18 +-
 ap-fetch-version-old |  8 +
 ap-print-url |  2 +-
 ap-push  |  6 
 cr-daily-branch  | 36 +---
 cr-for-branches  |  2 +-
 cri-common   |  2 +-
 make-flight  | 11 +++---
 8 files changed, 72 insertions(+), 13 deletions(-)

diff --git a/ap-fetch-version b/ap-fetch-version
index a714ee2..54265f4 100755
--- a/ap-fetch-version
+++ b/ap-fetch-version
@@ -106,10 +106,26 @@ ovmf)
repo_tree_rev_fetch_git ovmf \
$TREE_OVMF_UPSTREAM master $LOCALREV_OVMF
;;
-openstack-nova)
+openstack-tempest*)
+# OpenStack Tempest does not have stable branches and should work with 
any
+# version of OpenStack
repo_tree_rev_fetch_git openstack-nova \
$TREE_OPENSTACK_NOVA master $LOCALREV_OPENSTACK_NOVA
;;
+openstack-*-*)
+   os_tree="${branch#openstack-}"
+   os_tree="${os_tree%-*}"
+   branchcore="${branch##*-}"
+   eval repo_tree_rev_fetch_git "openstack-$os_tree" \
+   "\$TREE_OPENSTACK_${os_tree^^}" "stable/$branchcore" \
+"\$LOCALREV_OPENSTACK_${os_tree^^}"
+   ;;
+openstack-*)
+   os_tree="${branch#openstack-}"
+   eval repo_tree_rev_fetch_git "openstack-$os_tree" \
+   "\$TREE_OPENSTACK_${os_tree^^}" "master" \
+"\$LOCALREV_OPENSTACK_${os_tree^^}"
+   ;;
 osstest)
 if [ "x$OSSTEST_USE_HEAD" = "xy" ] ; then
git update-ref -m "Arranging to test HEAD" \
diff --git a/ap-fetch-version-old b/ap-fetch-version-old
index 6dddbb7..00af8fa 100755
--- a/ap-fetch-version-old
+++ b/ap-fetch-version-old
@@ -115,6 +115,14 @@ ovmf)
repo_tree_rev_fetch_git ovmf \
$BASE_TREE_OVMF xen-tested-master $BASE_LOCALREV_OVMF
;;
+openstack-nova-*)
+   os_tree="${branch#openstack-}"
+   os_tree="${os_tree%-*}"
+   branchcore="${branch##*-}"
+   eval repo_tree_rev_fetch_git "openstack-$os_tree" \
+   "\$BASE_TREE_OPENSTACK_${os_tree^^}" "xen-tested-$branchcore" \
+"\$BASE_LOCALREV_OPENSTACK_${os_tree^^}"
+   ;;
 openstack-nova)
repo_tree_rev_fetch_git openstack-nova \
$BASE_TREE_OPENSTACK_NOVA xen-tested-master 
$BASE_LOCALREV_OPENSTACK_NOVA
diff --git a/ap-print-url b/ap-print-url
index 6f4e6b1..6d42ddf 100755
--- a/ap-print-url
+++ b/ap-print-url
@@ -67,7 +67,7 @@ ovmf)
 osstest)
echo none:;
;;
-openstack-nova)
+openstack-nova*)
echo $TREE_OPENSTACK_NOVA
;;
 *)
diff --git a/ap-push b/ap-push
index 136d1b6..0cac8a6 100755
--- a/ap-push
+++ b/ap-push
@@ -134,6 +134,12 @@ openstack-nova)
cd $repos/openstack-nova
git push $TREE_OPENSTACK_NOVA $revision:refs/heads/xen-tested-master
;;
+openstack-nova-*)
+cd $repos/openstack-nova
+openstack_nova_branch=${branch#openstack-nova-}
+git push $TREE_OPENSTACK_NOVA \
+$revision:refs/heads/xen-tested-stable-$openstack_nova_branch
+;;
 osstest)
git push $HOME/testing.git $revision:production
if [ x"$TREEBRANCH_OSSTEST_UPSTREAM" != x ] ; then
diff --git a/cr-daily-branch b/cr-daily-branch
index 5b24c47..20c95fc 100755
--- a/cr-daily-branch
+++ b/cr-daily-branch
@@ -207,10 +207,38 @@ if [ "x$REVISION_LINUXFIRMWARE" = x ]; then
determine_version REVISION_LINUXFIRMWARE linuxfirmware LINUXFIRMWARE
 export REVISION_LINUXFIRMWARE
 fi
-if [ "x$REVISION_OPENSTACK_NOVA" = x ]; then
-determine_version REVISION_OPENSTACK_NOVA openstack-nova OPENSTACK_NOVA
-export REVISION_OPENSTACK_NOVA
-fi
+openstack_rev() {
+local os_tree="$1"
+local os_branch
+
+if eval [ "x\$REVISION_OPENSTACK_${os_tree^^}" = x ]; then
+case "$branch" in
+openstack-*-*)
+os_branch="openstack-$os_tree-${branch##*-}"
+os_git_branch="origin/stable/${branch##*-}"
+;;
+*)
+os_branch="openstack-$os_tree"
+os_git_branch="origin/master"
+;;
+esac
+
+# Use latest version, even for other openstack trees
+# so branch openstack-nova-ocata should have other trees like
+# openstack-neutron have the revision of the same branch fetch
+# at the same time
+if [ "$branch" != "$os_branch" 

[Xen-devel] [OSSTEST PATCH v11 18/20] ts-logs-capture: Capture OpenStack logs

2017-06-16 Thread Anthony PERARD
Signed-off-by: Anthony PERARD 
---
 ts-logs-capture | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/ts-logs-capture b/ts-logs-capture
index 061a118..0e3d267 100755
--- a/ts-logs-capture
+++ b/ts-logs-capture
@@ -171,6 +171,12 @@ sub fetch_logs_host () {
 
   /var/core/*.core
 
+  /var/log/openstack/*.log
+  /etc/nova/*
+  /etc/neutron/*
+  /etc/cinder/*
+  
/home/osstest/build.*.test-*-devstack/tempest/etc/tempest.conf
+
   )];
 if (!try_fetch_logs($ho, $logs)) {
 logm("log fetching failed, trying hard host reboot...");
-- 
Anthony PERARD


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [OSSTEST PATCH v11 15/20] ts-openstack-tempest: Fix tempest invocation

2017-06-16 Thread Anthony PERARD
./run_tempest.sh is deprecated.

Signed-off-by: Anthony PERARD 
---
 ts-openstack-tempest | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/ts-openstack-tempest b/ts-openstack-tempest
index 82e9a71..b95043a 100755
--- a/ts-openstack-tempest
+++ b/ts-openstack-tempest
@@ -58,7 +58,8 @@ sub tempest() {
 
 target_cmd($ho, <

[Xen-devel] [OSSTEST PATCH v11 17/20] ts-openstack-deploy: Move logs to /var/log/openstack

2017-06-16 Thread Anthony PERARD
Signed-off-by: Anthony PERARD 
---
 ts-openstack-deploy | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ts-openstack-deploy b/ts-openstack-deploy
index 8f6c7a2..cffbb5d 100755
--- a/ts-openstack-deploy
+++ b/ts-openstack-deploy
@@ -58,7 +58,7 @@ DEST=$builddir
 DATA_DIR=\$DEST/data
 SERVICE_DIR=\$DEST/status
 SUBUNIT_OUTPUT=\$DEST/devstack.subunit
-LOGFILE=\$DEST/logs/stack.sh.log
+LOGDIR=/var/log/openstack
 LOG_COLOR=False
 LIBVIRT_TYPE=xen
 disable_service horizon
-- 
Anthony PERARD


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [OSSTEST PATCH v11 16/20] ts-openstack-tempest: Update list of skipped tests

2017-06-16 Thread Anthony PERARD
Signed-off-by: Anthony PERARD 
---
 ts-openstack-tempest | 19 ---
 1 file changed, 8 insertions(+), 11 deletions(-)

diff --git a/ts-openstack-tempest b/ts-openstack-tempest
index b95043a..ae3662f 100755
--- a/ts-openstack-tempest
+++ b/ts-openstack-tempest
@@ -31,23 +31,20 @@ sub tempest() {
 my $scenario = 'tempest.scenario';
 my $volume_boot_pattern =
 "$scenario.test_volume_boot_pattern.TestVolumeBootPattern";
-my $shelve_instance = "$scenario.test_shelve_instance.TestShelveInstance";
-
-# Ignore tests which try to boot a guest with /dev/vda as boot device name.
-push @ignored_tests,
-"^\Q$volume_boot_pattern.test_volume_boot_pattern\E";
-push @ignored_tests,
-"^\Q$volume_boot_pattern.test_create_ebs_image_and_check_boot\E";
-push @ignored_tests,
-"^\Q$shelve_instance.test_shelve_volume_backed_instance\E";
 
 # Those tests access a volume through iSCSI. This does not work when both
 # the server and client of iSCSI are on the same Xen host (both in dom0),
 # Linux 4.0 is the first Linux to have a fix.
 push @ignored_tests,
-"^\Q${volume_boot_pattern}V2.test_volume_boot_pattern\E";
+"^\Q${volume_boot_pattern}.test_volume_boot_pattern\E";
+push @ignored_tests,
+"^\Q${volume_boot_pattern}.test_create_ebs_image_and_check_boot\E";
+
+# See nova.git:devstack/tempest-dsvm-tempest-xen-rc
+push @ignored_tests,
+
"^\Qtempest.api.compute.admin.test_volume_swap.TestVolumeSwap.test_volume_swap\E";
 push @ignored_tests,
-"^\Q${volume_boot_pattern}V2.test_create_ebs_image_and_check_boot\E";
+
"^\Qtempest.api.compute.images.test_images.ImagesTestJSON.test_create_image_from_paused_server\E";
 
 # This regex below select the tests to run and exclude the ones marked as
 # slow as well as the explicit tests listed above.  It is based on the one
-- 
Anthony PERARD


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [OSSTEST PATCH v11 13/20] ts-openstack-deploy: Apply a Tempest patch

2017-06-16 Thread Anthony PERARD
Signed-off-by: Anthony PERARD 
---
 ts-openstack-deploy | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/ts-openstack-deploy b/ts-openstack-deploy
index 04317a0..04053de 100755
--- a/ts-openstack-deploy
+++ b/ts-openstack-deploy
@@ -144,6 +144,16 @@ END
 <

[Xen-devel] [PATCH 42/44] powerpc/cell: use the dma_supported method for ops switching

2017-06-16 Thread Christoph Hellwig
Besides removing the last instance of the set_dma_mask method this also
reduced the code duplication.

Signed-off-by: Christoph Hellwig 
---
 arch/powerpc/platforms/cell/iommu.c | 25 +
 1 file changed, 9 insertions(+), 16 deletions(-)

diff --git a/arch/powerpc/platforms/cell/iommu.c 
b/arch/powerpc/platforms/cell/iommu.c
index 497bfbdbd967..29d4f96ed33e 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -644,20 +644,14 @@ static void dma_fixed_unmap_sg(struct device *dev, struct 
scatterlist *sg,
   direction, attrs);
 }
 
-static int dma_fixed_dma_supported(struct device *dev, u64 mask)
-{
-   return mask == DMA_BIT_MASK(64);
-}
-
-static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+static int dma_suported_and_switch(struct device *dev, u64 dma_mask);
 
 static const struct dma_map_ops dma_iommu_fixed_ops = {
.alloc  = dma_fixed_alloc_coherent,
.free   = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg,
.unmap_sg   = dma_fixed_unmap_sg,
-   .dma_supported  = dma_fixed_dma_supported,
-   .set_dma_mask   = dma_set_mask_and_switch,
+   .dma_supported  = dma_suported_and_switch,
.map_page   = dma_fixed_map_page,
.unmap_page = dma_fixed_unmap_page,
.mapping_error  = dma_iommu_mapping_error,
@@ -952,11 +946,8 @@ static u64 cell_iommu_get_fixed_address(struct device *dev)
return dev_addr;
 }
 
-static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
+static int dma_suported_and_switch(struct device *dev, u64 dma_mask)
 {
-   if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-   return -EIO;
-
if (dma_mask == DMA_BIT_MASK(64) &&
cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) {
u64 addr = cell_iommu_get_fixed_address(dev) +
@@ -965,14 +956,16 @@ static int dma_set_mask_and_switch(struct device *dev, 
u64 dma_mask)
dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
set_dma_ops(dev, _iommu_fixed_ops);
set_dma_offset(dev, addr);
-   } else {
+   return 1;
+   }
+
+   if (dma_iommu_dma_supported(dev, dma_mask)) {
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
set_dma_ops(dev, get_pci_dma_ops());
cell_dma_dev_setup(dev);
+   return 1;
}
 
-   *dev->dma_mask = dma_mask;
-
return 0;
 }
 
@@ -1127,7 +1120,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
}
 
-   dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
+   dma_iommu_ops.dma_supported = dma_suported_and_switch;
set_pci_dma_ops(_iommu_ops);
 
return 0;
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 39/44] xen-swiotlb: remove xen_swiotlb_set_dma_mask

2017-06-16 Thread Christoph Hellwig
This just duplicates the generic implementation.

Signed-off-by: Christoph Hellwig 
---
 drivers/xen/swiotlb-xen.c | 12 
 1 file changed, 12 deletions(-)

diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index c3a04b2d7532..82fc54f8eb77 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -661,17 +661,6 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
 }
 
-static int
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
-{
-   if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
-   return -EIO;
-
-   *dev->dma_mask = dma_mask;
-
-   return 0;
-}
-
 /*
  * Create userspace mapping for the DMA-coherent memory.
  * This function should be called with the pages from the current domain only,
@@ -734,7 +723,6 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.map_page = xen_swiotlb_map_page,
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
-   .set_dma_mask = xen_swiotlb_set_dma_mask,
.mmap = xen_swiotlb_dma_mmap,
.get_sgtable = xen_swiotlb_get_sgtable,
.mapping_error  = xen_swiotlb_mapping_error,
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 40/44] tile: remove dma_supported and mapping_error methods

2017-06-16 Thread Christoph Hellwig
These just duplicate the default behavior if no method is provided.

Signed-off-by: Christoph Hellwig 
---
 arch/tile/kernel/pci-dma.c | 30 --
 1 file changed, 30 deletions(-)

diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 569bb6dd154a..f2abedc8a080 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -317,18 +317,6 @@ static void tile_dma_sync_sg_for_device(struct device *dev,
}
 }
 
-static inline int
-tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-   return 0;
-}
-
-static inline int
-tile_dma_supported(struct device *dev, u64 mask)
-{
-   return 1;
-}
-
 static const struct dma_map_ops tile_default_dma_map_ops = {
.alloc = tile_dma_alloc_coherent,
.free = tile_dma_free_coherent,
@@ -340,8 +328,6 @@ static const struct dma_map_ops tile_default_dma_map_ops = {
.sync_single_for_device = tile_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_dma_sync_sg_for_device,
-   .mapping_error = tile_dma_mapping_error,
-   .dma_supported = tile_dma_supported
 };
 
 const struct dma_map_ops *tile_dma_map_ops = _default_dma_map_ops;
@@ -504,18 +490,6 @@ static void tile_pci_dma_sync_sg_for_device(struct device 
*dev,
}
 }
 
-static inline int
-tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-   return 0;
-}
-
-static inline int
-tile_pci_dma_supported(struct device *dev, u64 mask)
-{
-   return 1;
-}
-
 static const struct dma_map_ops tile_pci_default_dma_map_ops = {
.alloc = tile_pci_dma_alloc_coherent,
.free = tile_pci_dma_free_coherent,
@@ -527,8 +501,6 @@ static const struct dma_map_ops 
tile_pci_default_dma_map_ops = {
.sync_single_for_device = tile_pci_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
-   .mapping_error = tile_pci_dma_mapping_error,
-   .dma_supported = tile_pci_dma_supported
 };
 
 const struct dma_map_ops *gx_pci_dma_map_ops = _pci_default_dma_map_ops;
@@ -578,8 +550,6 @@ static const struct dma_map_ops pci_hybrid_dma_ops = {
.sync_single_for_device = tile_pci_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
-   .mapping_error = tile_pci_dma_mapping_error,
-   .dma_supported = tile_pci_dma_supported
 };
 
 const struct dma_map_ops *gx_legacy_pci_dma_map_ops = _swiotlb_dma_ops;
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 43/44] dma-mapping: remove the set_dma_mask method

2017-06-16 Thread Christoph Hellwig
Signed-off-by: Christoph Hellwig 
---
 arch/powerpc/kernel/dma.c   | 4 
 include/linux/dma-mapping.h | 6 --
 2 files changed, 10 deletions(-)

diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 41c749586bd2..466c9f07b288 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -316,10 +316,6 @@ EXPORT_SYMBOL(dma_set_coherent_mask);
 
 int __dma_set_mask(struct device *dev, u64 dma_mask)
 {
-   const struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
-   if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
-   return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 3e5908656226..527f2ed8c645 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -127,7 +127,6 @@ struct dma_map_ops {
   enum dma_data_direction dir);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
-   int (*set_dma_mask)(struct device *dev, u64 mask);
 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 (*get_required_mask)(struct device *dev);
 #endif
@@ -563,11 +562,6 @@ static inline int dma_supported(struct device *dev, u64 
mask)
 #ifndef HAVE_ARCH_DMA_SET_MASK
 static inline int dma_set_mask(struct device *dev, u64 mask)
 {
-   const struct dma_map_ops *ops = get_dma_ops(dev);
-
-   if (ops->set_dma_mask)
-   return ops->set_dma_mask(dev, mask);
-
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 44/44] powerpc: merge __dma_set_mask into dma_set_mask

2017-06-16 Thread Christoph Hellwig
Signed-off-by: Christoph Hellwig 
---
 arch/powerpc/include/asm/dma-mapping.h |  1 -
 arch/powerpc/kernel/dma.c  | 13 -
 2 files changed, 4 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/include/asm/dma-mapping.h 
b/arch/powerpc/include/asm/dma-mapping.h
index 73aedbe6c977..eaece3d3e225 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -112,7 +112,6 @@ static inline void set_dma_offset(struct device *dev, 
dma_addr_t off)
 #define HAVE_ARCH_DMA_SET_MASK 1
 extern int dma_set_mask(struct device *dev, u64 dma_mask);
 
-extern int __dma_set_mask(struct device *dev, u64 dma_mask);
 extern u64 __dma_get_required_mask(struct device *dev);
 
 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t 
size)
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 466c9f07b288..4194db10 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -314,14 +314,6 @@ EXPORT_SYMBOL(dma_set_coherent_mask);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 
-int __dma_set_mask(struct device *dev, u64 dma_mask)
-{
-   if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-   return -EIO;
-   *dev->dma_mask = dma_mask;
-   return 0;
-}
-
 int dma_set_mask(struct device *dev, u64 dma_mask)
 {
if (ppc_md.dma_set_mask)
@@ -334,7 +326,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
return phb->controller_ops.dma_set_mask(pdev, dma_mask);
}
 
-   return __dma_set_mask(dev, dma_mask);
+   if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+   return -EIO;
+   *dev->dma_mask = dma_mask;
+   return 0;
 }
 EXPORT_SYMBOL(dma_set_mask);
 
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 38/44] arm: implement ->dma_supported instead of ->set_dma_mask

2017-06-16 Thread Christoph Hellwig
Same behavior, less code duplication.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/common/dmabounce.c | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 6ecd5be5d37e..9a92de63426f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -445,12 +445,12 @@ static void dmabounce_sync_for_device(struct device *dev,
arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
 }
 
-static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
 {
if (dev->archdata.dmabounce)
return 0;
 
-   return arm_dma_ops.set_dma_mask(dev, dma_mask);
+   return arm_dma_ops.dma_supported(dev, dma_mask);
 }
 
 static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -471,9 +471,8 @@ static const struct dma_map_ops dmabounce_ops = {
.unmap_sg   = arm_dma_unmap_sg,
.sync_sg_for_cpu= arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
-   .set_dma_mask   = dmabounce_set_mask,
+   .dma_supported  = dmabounce_dma_supported,
.mapping_error  = dmabounce_mapping_error,
-   .dma_supported  = arm_dma_supported,
 };
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 41/44] powerpc/cell: clean up fixed mapping dma_ops initialization

2017-06-16 Thread Christoph Hellwig
By the time cell_pci_dma_dev_setup calls cell_dma_dev_setup no device can
have the fixed map_ops set yet as it's only set by the set_dma_mask
method.  So move the setup for the fixed case to be only called in that
place instead of indirecting through cell_dma_dev_setup.

Signed-off-by: Christoph Hellwig 
---
 arch/powerpc/platforms/cell/iommu.c | 27 +++
 1 file changed, 7 insertions(+), 20 deletions(-)

diff --git a/arch/powerpc/platforms/cell/iommu.c 
b/arch/powerpc/platforms/cell/iommu.c
index 948086e33a0c..497bfbdbd967 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -663,14 +663,9 @@ static const struct dma_map_ops dma_iommu_fixed_ops = {
.mapping_error  = dma_iommu_mapping_error,
 };
 
-static void cell_dma_dev_setup_fixed(struct device *dev);
-
 static void cell_dma_dev_setup(struct device *dev)
 {
-   /* Order is important here, these are not mutually exclusive */
-   if (get_dma_ops(dev) == _iommu_fixed_ops)
-   cell_dma_dev_setup_fixed(dev);
-   else if (get_pci_dma_ops() == _iommu_ops)
+   if (get_pci_dma_ops() == _iommu_ops)
set_iommu_table_base(dev, cell_get_iommu_table(dev));
else if (get_pci_dma_ops() == _direct_ops)
set_dma_offset(dev, cell_dma_direct_offset);
@@ -963,32 +958,24 @@ static int dma_set_mask_and_switch(struct device *dev, 
u64 dma_mask)
return -EIO;
 
if (dma_mask == DMA_BIT_MASK(64) &&
-   cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
-   {
+   cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) {
+   u64 addr = cell_iommu_get_fixed_address(dev) +
+   dma_iommu_fixed_base;
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
+   dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
set_dma_ops(dev, _iommu_fixed_ops);
+   set_dma_offset(dev, addr);
} else {
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
set_dma_ops(dev, get_pci_dma_ops());
+   cell_dma_dev_setup(dev);
}
 
-   cell_dma_dev_setup(dev);
-
*dev->dma_mask = dma_mask;
 
return 0;
 }
 
-static void cell_dma_dev_setup_fixed(struct device *dev)
-{
-   u64 addr;
-
-   addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
-   set_dma_offset(dev, addr);
-
-   dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
-}
-
 static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
   unsigned long base_pte)
 {
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 33/44] openrisc: remove arch-specific dma_supported implementation

2017-06-16 Thread Christoph Hellwig
This implementation is simply bogus - openrisc only has a simple
direct mapped DMA implementation and thus doesn't care about the
address.

Signed-off-by: Christoph Hellwig 
---
 arch/openrisc/include/asm/dma-mapping.h | 7 ---
 1 file changed, 7 deletions(-)

diff --git a/arch/openrisc/include/asm/dma-mapping.h 
b/arch/openrisc/include/asm/dma-mapping.h
index a4ea139c2ef9..f41bd3cb76d9 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -33,11 +33,4 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
return _dma_map_ops;
 }
 
-#define HAVE_ARCH_DMA_SUPPORTED 1
-static inline int dma_supported(struct device *dev, u64 dma_mask)
-{
-   /* Support 32 bit DMA mask exclusively */
-   return dma_mask == DMA_BIT_MASK(32);
-}
-
 #endif /* __ASM_OPENRISC_DMA_MAPPING_H */
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 30/44] dma-virt: remove dma_supported and mapping_error methods

2017-06-16 Thread Christoph Hellwig
These just duplicate the default behavior if no method is provided.

Signed-off-by: Christoph Hellwig 
---
 lib/dma-virt.c | 12 
 1 file changed, 12 deletions(-)

diff --git a/lib/dma-virt.c b/lib/dma-virt.c
index dcd4df1f7174..5c4f11329721 100644
--- a/lib/dma-virt.c
+++ b/lib/dma-virt.c
@@ -51,22 +51,10 @@ static int dma_virt_map_sg(struct device *dev, struct 
scatterlist *sgl,
return nents;
 }
 
-static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-   return false;
-}
-
-static int dma_virt_supported(struct device *dev, u64 mask)
-{
-   return true;
-}
-
 const struct dma_map_ops dma_virt_ops = {
.alloc  = dma_virt_alloc,
.free   = dma_virt_free,
.map_page   = dma_virt_map_page,
.map_sg = dma_virt_map_sg,
-   .mapping_error  = dma_virt_mapping_error,
-   .dma_supported  = dma_virt_supported,
 };
 EXPORT_SYMBOL(dma_virt_ops);
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 34/44] arm: remove arch specific dma_supported implementation

2017-06-16 Thread Christoph Hellwig
And instead wire it up as method for all the dma_map_ops instances.

Note that the code seems a little fishy for dmabounce and iommu, but
for now I'd like to preserve the existing behavior 1:1.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/common/dmabounce.c| 1 +
 arch/arm/include/asm/dma-iommu.h   | 2 ++
 arch/arm/include/asm/dma-mapping.h | 3 ---
 arch/arm/mm/dma-mapping.c  | 7 +--
 4 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 4060378e0f14..6ecd5be5d37e 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -473,6 +473,7 @@ static const struct dma_map_ops dmabounce_ops = {
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.set_dma_mask   = dmabounce_set_mask,
.mapping_error  = dmabounce_mapping_error,
+   .dma_supported  = arm_dma_supported,
 };
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 389a26a10ea3..c090ec675eac 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -35,5 +35,7 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
 void arm_iommu_detach_device(struct device *dev);
 
+int arm_dma_supported(struct device *dev, u64 mask);
+
 #endif /* __KERNEL__ */
 #endif
diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index 52a8fd5a8edb..8dabcfdf4505 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -20,9 +20,6 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
return _dma_ops;
 }
 
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *dev, u64 mask);
-
 #ifdef __arch_page_to_dma
 #error Please update to __arch_pfn_to_dma
 #endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8f2c5a8a98f0..b9677ada421f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -199,6 +199,7 @@ const struct dma_map_ops arm_dma_ops = {
.sync_sg_for_cpu= arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.mapping_error  = arm_dma_mapping_error,
+   .dma_supported  = arm_dma_supported,
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
@@ -218,6 +219,7 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.map_page   = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
.mapping_error  = arm_dma_mapping_error,
+   .dma_supported  = arm_dma_supported,
 };
 EXPORT_SYMBOL(arm_coherent_dma_ops);
 
@@ -1184,11 +1186,10 @@ void arm_dma_sync_sg_for_device(struct device *dev, 
struct scatterlist *sg,
  * during bus mastering, then you would pass 0x00ff as the mask
  * to this function.
  */
-int dma_supported(struct device *dev, u64 mask)
+int arm_dma_supported(struct device *dev, u64 mask)
 {
return __dma_supported(dev, mask, false);
 }
-EXPORT_SYMBOL(dma_supported);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
 
@@ -2149,6 +2150,7 @@ const struct dma_map_ops iommu_ops = {
.unmap_resource = arm_iommu_unmap_resource,
 
.mapping_error  = arm_dma_mapping_error,
+   .dma_supported  = arm_dma_supported,
 };
 
 const struct dma_map_ops iommu_coherent_ops = {
@@ -2167,6 +2169,7 @@ const struct dma_map_ops iommu_coherent_ops = {
.unmap_resource = arm_iommu_unmap_resource,
 
.mapping_error  = arm_dma_mapping_error,
+   .dma_supported  = arm_dma_supported,
 };
 
 /**
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 35/44] x86: remove arch specific dma_supported implementation

2017-06-16 Thread Christoph Hellwig
And instead wire it up as method for all the dma_map_ops instances.

Note that this also means the arch specific check will be fully instead
of partially applied in the AMD iommu driver.

Signed-off-by: Christoph Hellwig 
---
 arch/x86/include/asm/dma-mapping.h | 3 ---
 arch/x86/include/asm/iommu.h   | 2 ++
 arch/x86/kernel/amd_gart_64.c  | 1 +
 arch/x86/kernel/pci-calgary_64.c   | 1 +
 arch/x86/kernel/pci-dma.c  | 7 +--
 arch/x86/kernel/pci-nommu.c| 1 +
 arch/x86/pci/sta2x11-fixup.c   | 3 ++-
 drivers/iommu/amd_iommu.c  | 2 ++
 drivers/iommu/intel-iommu.c| 3 +++
 9 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/arch/x86/include/asm/dma-mapping.h 
b/arch/x86/include/asm/dma-mapping.h
index c35d228aa381..398c79889f5c 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -33,9 +33,6 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
 #define arch_dma_alloc_attrs arch_dma_alloc_attrs
 
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *hwdev, u64 mask);
-
 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs);
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 793869879464..fca144a104e4 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,6 +6,8 @@ extern int force_iommu, no_iommu;
 extern int iommu_detected;
 extern int iommu_pass_through;
 
+int x86_dma_supported(struct device *dev, u64 mask);
+
 /* 10 seconds */
 #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
 
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 815dd63f49d0..cc0e8bc0ea3f 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -704,6 +704,7 @@ static const struct dma_map_ops gart_dma_ops = {
.alloc  = gart_alloc_coherent,
.free   = gart_free_coherent,
.mapping_error  = gart_mapping_error,
+   .dma_supported  = x86_dma_supported,
 };
 
 static void gart_iommu_shutdown(void)
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index e75b490f2b0b..5286a4a92cf7 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -493,6 +493,7 @@ static const struct dma_map_ops calgary_dma_ops = {
.map_page = calgary_map_page,
.unmap_page = calgary_unmap_page,
.mapping_error = calgary_mapping_error,
+   .dma_supported = x86_dma_supported,
 };
 
 static inline void __iomem * busno_to_bbar(unsigned char num)
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3a216ec869cd..b6f5684be3b5 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -213,10 +213,8 @@ static __init int iommu_setup(char *p)
 }
 early_param("iommu", iommu_setup);
 
-int dma_supported(struct device *dev, u64 mask)
+int x86_dma_supported(struct device *dev, u64 mask)
 {
-   const struct dma_map_ops *ops = get_dma_ops(dev);
-
 #ifdef CONFIG_PCI
if (mask > 0x && forbid_dac > 0) {
dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -224,9 +222,6 @@ int dma_supported(struct device *dev, u64 mask)
}
 #endif
 
-   if (ops->dma_supported)
-   return ops->dma_supported(dev, mask);
-
/* Copied from i386. Doesn't make much sense, because it will
   only work for pci_alloc_coherent.
   The caller just has to use GFP_DMA in this case. */
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 085fe6ce4049..a6d404087fe3 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -104,4 +104,5 @@ const struct dma_map_ops nommu_dma_ops = {
.sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys= 1,
.mapping_error  = nommu_mapping_error,
+   .dma_supported  = x86_dma_supported,
 };
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index ec008e800b45..53d600217973 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -26,6 +26,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #define STA2X11_SWIOTLB_SIZE (4*1024*1024)
 extern int swiotlb_late_init_with_default_size(size_t default_size);
@@ -191,7 +192,7 @@ static const struct dma_map_ops sta2x11_dma_ops = {
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
-   .dma_supported = NULL, /* FIXME: we should use this instead! */
+   .dma_supported = x86_dma_supported,
 };
 

[Xen-devel] [PATCH 37/44] mips/loongson64: implement ->dma_supported instead of ->set_dma_mask

2017-06-16 Thread Christoph Hellwig
Same behavior, less code duplication.

Signed-off-by: Christoph Hellwig 
---
 arch/mips/loongson64/common/dma-swiotlb.c | 19 +--
 1 file changed, 5 insertions(+), 14 deletions(-)

diff --git a/arch/mips/loongson64/common/dma-swiotlb.c 
b/arch/mips/loongson64/common/dma-swiotlb.c
index 178ca17a5667..34486c138206 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -75,19 +75,11 @@ static void loongson_dma_sync_sg_for_device(struct device 
*dev,
mb();
 }
 
-static int loongson_dma_set_mask(struct device *dev, u64 mask)
+static int loongson_dma_supported(struct device *dev, u64 mask)
 {
-   if (!dev->dma_mask || !dma_supported(dev, mask))
-   return -EIO;
-
-   if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) {
-   *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits);
-   return -EIO;
-   }
-
-   *dev->dma_mask = mask;
-
-   return 0;
+   if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits))
+   return 0;
+   return swiotlb_dma_supported(dev, mask);
 }
 
 dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
@@ -126,8 +118,7 @@ static const struct dma_map_ops loongson_dma_map_ops = {
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = loongson_dma_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
-   .dma_supported = swiotlb_dma_supported,
-   .set_dma_mask = loongson_dma_set_mask
+   .dma_supported = loongson_dma_supported,
 };
 
 void __init plat_swiotlb_setup(void)
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 36/44] dma-mapping: remove HAVE_ARCH_DMA_SUPPORTED

2017-06-16 Thread Christoph Hellwig
Signed-off-by: Christoph Hellwig 
---
 include/linux/dma-mapping.h | 2 --
 1 file changed, 2 deletions(-)

diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index a57875309bfd..3e5908656226 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -549,7 +549,6 @@ static inline int dma_mapping_error(struct device *dev, 
dma_addr_t dma_addr)
return 0;
 }
 
-#ifndef HAVE_ARCH_DMA_SUPPORTED
 static inline int dma_supported(struct device *dev, u64 mask)
 {
const struct dma_map_ops *ops = get_dma_ops(dev);
@@ -560,7 +559,6 @@ static inline int dma_supported(struct device *dev, u64 
mask)
return 1;
return ops->dma_supported(dev, mask);
 }
-#endif
 
 #ifndef HAVE_ARCH_DMA_SET_MASK
 static inline int dma_set_mask(struct device *dev, u64 mask)
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 32/44] hexagon: remove the unused dma_is_consistent prototype

2017-06-16 Thread Christoph Hellwig
Signed-off-by: Christoph Hellwig 
---
 arch/hexagon/include/asm/dma-mapping.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/arch/hexagon/include/asm/dma-mapping.h 
b/arch/hexagon/include/asm/dma-mapping.h
index 9c15cb5271a6..463dbc18f853 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -37,7 +37,6 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
 }
 
-extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
   enum dma_data_direction direction);
 
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 31/44] hexagon: remove arch-specific dma_supported implementation

2017-06-16 Thread Christoph Hellwig
This implementation is simply bogus - hexagon only has a simple
direct mapped DMA implementation and thus doesn't care about the
address.

Signed-off-by: Christoph Hellwig 
Acked-by: Richard Kuo 
---
 arch/hexagon/include/asm/dma-mapping.h | 2 --
 arch/hexagon/kernel/dma.c  | 9 -
 2 files changed, 11 deletions(-)

diff --git a/arch/hexagon/include/asm/dma-mapping.h 
b/arch/hexagon/include/asm/dma-mapping.h
index 00e3f10113b0..9c15cb5271a6 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -37,8 +37,6 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
 }
 
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *dev, u64 mask);
 extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
   enum dma_data_direction direction);
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 71269dc0f225..9ff1b2041f85 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -35,15 +35,6 @@ static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
return phys_to_virt((unsigned long) dma_addr);
 }
 
-int dma_supported(struct device *dev, u64 mask)
-{
-   if (mask == DMA_BIT_MASK(32))
-   return 1;
-   else
-   return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
 static struct gen_pool *coherent_pool;
 
 
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 28/44] sparc: remove arch specific dma_supported implementations

2017-06-16 Thread Christoph Hellwig
Usually dma_supported decisions are done by the dma_map_ops instance.
Switch sparc to that model by providing a ->dma_supported instance for
sbus that always returns false, and implementations tailored to the sun4u
and sun4v cases for sparc64, and leave it unimplemented for PCI on
sparc32, which means always supported.

Signed-off-by: Christoph Hellwig 
Acked-by: David S. Miller 
---
 arch/sparc/include/asm/dma-mapping.h |  3 ---
 arch/sparc/kernel/iommu.c| 40 +++-
 arch/sparc/kernel/ioport.c   | 22 ++--
 arch/sparc/kernel/pci_sun4v.c| 17 +++
 4 files changed, 39 insertions(+), 43 deletions(-)

diff --git a/arch/sparc/include/asm/dma-mapping.h 
b/arch/sparc/include/asm/dma-mapping.h
index 98da9f92c318..60bf1633d554 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -5,9 +5,6 @@
 #include 
 #include 
 
-#define HAVE_ARCH_DMA_SUPPORTED 1
-int dma_supported(struct device *dev, u64 mask);
-
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  enum dma_data_direction dir)
 {
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index dafa316d978d..fcbcc031f615 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -746,6 +746,21 @@ static int dma_4u_mapping_error(struct device *dev, 
dma_addr_t dma_addr)
return dma_addr == SPARC_MAPPING_ERROR;
 }
 
+static int dma_4u_supported(struct device *dev, u64 device_mask)
+{
+   struct iommu *iommu = dev->archdata.iommu;
+
+   if (device_mask > DMA_BIT_MASK(32))
+   return 0;
+   if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
+   return 1;
+#ifdef CONFIG_PCI
+   if (dev_is_pci(dev))
+   return pci64_dma_supported(to_pci_dev(dev), device_mask);
+#endif
+   return 0;
+}
+
 static const struct dma_map_ops sun4u_dma_ops = {
.alloc  = dma_4u_alloc_coherent,
.free   = dma_4u_free_coherent,
@@ -755,32 +770,9 @@ static const struct dma_map_ops sun4u_dma_ops = {
.unmap_sg   = dma_4u_unmap_sg,
.sync_single_for_cpu= dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu= dma_4u_sync_sg_for_cpu,
+   .dma_supported  = dma_4u_supported,
.mapping_error  = dma_4u_mapping_error,
 };
 
 const struct dma_map_ops *dma_ops = _dma_ops;
 EXPORT_SYMBOL(dma_ops);
-
-int dma_supported(struct device *dev, u64 device_mask)
-{
-   struct iommu *iommu = dev->archdata.iommu;
-   u64 dma_addr_mask = iommu->dma_addr_mask;
-
-   if (device_mask > DMA_BIT_MASK(32)) {
-   if (iommu->atu)
-   dma_addr_mask = iommu->atu->dma_addr_mask;
-   else
-   return 0;
-   }
-
-   if ((device_mask & dma_addr_mask) == dma_addr_mask)
-   return 1;
-
-#ifdef CONFIG_PCI
-   if (dev_is_pci(dev))
-   return pci64_dma_supported(to_pci_dev(dev), device_mask);
-#endif
-
-   return 0;
-}
-EXPORT_SYMBOL(dma_supported);
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index dd081d557609..12894f259bea 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -401,6 +401,11 @@ static void sbus_sync_sg_for_device(struct device *dev, 
struct scatterlist *sg,
BUG();
 }
 
+static int sbus_dma_supported(struct device *dev, u64 mask)
+{
+   return 0;
+}
+
 static const struct dma_map_ops sbus_dma_ops = {
.alloc  = sbus_alloc_coherent,
.free   = sbus_free_coherent,
@@ -410,6 +415,7 @@ static const struct dma_map_ops sbus_dma_ops = {
.unmap_sg   = sbus_unmap_sg,
.sync_sg_for_cpu= sbus_sync_sg_for_cpu,
.sync_sg_for_device = sbus_sync_sg_for_device,
+   .dma_supported  = sbus_dma_supported,
 };
 
 static int __init sparc_register_ioport(void)
@@ -655,22 +661,6 @@ EXPORT_SYMBOL(pci32_dma_ops);
 const struct dma_map_ops *dma_ops = _dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
-
-/*
- * Return whether the given PCI device DMA address mask can be
- * supported properly.  For example, if your device can only drive the
- * low 24-bits during PCI bus mastering, then you would pass
- * 0x00ff as the mask to this function.
- */
-int dma_supported(struct device *dev, u64 mask)
-{
-   if (dev_is_pci(dev))
-   return 1;
-
-   return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
 #ifdef CONFIG_PROC_FS
 
 static int sparc_io_proc_show(struct seq_file *m, void *v)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 8e2a56f4c03a..24f21c726dfa 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -24,6 +24,7 @@
 
 #include "pci_impl.h"
 #include "iommu_common.h"
+#include 

[Xen-devel] [PATCH 27/44] sparc: remove leon_dma_ops

2017-06-16 Thread Christoph Hellwig
We can just use pci32_dma_ops directly.

Signed-off-by: Christoph Hellwig 
Acked-by: David S. Miller 
---
 arch/sparc/include/asm/dma-mapping.h | 3 +--
 arch/sparc/kernel/ioport.c   | 5 +
 2 files changed, 2 insertions(+), 6 deletions(-)

diff --git a/arch/sparc/include/asm/dma-mapping.h 
b/arch/sparc/include/asm/dma-mapping.h
index b8e8dfcd065d..98da9f92c318 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -17,7 +17,6 @@ static inline void dma_cache_sync(struct device *dev, void 
*vaddr, size_t size,
 }
 
 extern const struct dma_map_ops *dma_ops;
-extern const struct dma_map_ops *leon_dma_ops;
 extern const struct dma_map_ops pci32_dma_ops;
 
 extern struct bus_type pci_bus_type;
@@ -26,7 +25,7 @@ static inline const struct dma_map_ops 
*get_arch_dma_ops(struct bus_type *bus)
 {
 #ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
-   return leon_dma_ops;
+   return _dma_ops;
 #endif
 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (bus == _bus_type)
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index cf20033a1458..dd081d557609 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -637,6 +637,7 @@ static void pci32_sync_sg_for_device(struct device *device, 
struct scatterlist *
}
 }
 
+/* note: leon re-uses pci32_dma_ops */
 const struct dma_map_ops pci32_dma_ops = {
.alloc  = pci32_alloc_coherent,
.free   = pci32_free_coherent,
@@ -651,10 +652,6 @@ const struct dma_map_ops pci32_dma_ops = {
 };
 EXPORT_SYMBOL(pci32_dma_ops);
 
-/* leon re-uses pci32_dma_ops */
-const struct dma_map_ops *leon_dma_ops = _dma_ops;
-EXPORT_SYMBOL(leon_dma_ops);
-
 const struct dma_map_ops *dma_ops = _dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 29/44] dma-noop: remove dma_supported and mapping_error methods

2017-06-16 Thread Christoph Hellwig
These just duplicate the default behavior if no method is provided.

Signed-off-by: Christoph Hellwig 
---
 lib/dma-noop.c | 12 
 1 file changed, 12 deletions(-)

diff --git a/lib/dma-noop.c b/lib/dma-noop.c
index de26c8b68f34..643a074f139d 100644
--- a/lib/dma-noop.c
+++ b/lib/dma-noop.c
@@ -54,23 +54,11 @@ static int dma_noop_map_sg(struct device *dev, struct 
scatterlist *sgl, int nent
return nents;
 }
 
-static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-   return 0;
-}
-
-static int dma_noop_supported(struct device *dev, u64 mask)
-{
-   return 1;
-}
-
 const struct dma_map_ops dma_noop_ops = {
.alloc  = dma_noop_alloc,
.free   = dma_noop_free,
.map_page   = dma_noop_map_page,
.map_sg = dma_noop_map_sg,
-   .mapping_error  = dma_noop_mapping_error,
-   .dma_supported  = dma_noop_supported,
 };
 
 EXPORT_SYMBOL(dma_noop_ops);
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 23/44] x86/calgary: implement ->mapping_error

2017-06-16 Thread Christoph Hellwig
DMA_ERROR_CODE is going to go away, so don't rely on it.

Signed-off-by: Christoph Hellwig 
---
 arch/x86/kernel/pci-calgary_64.c | 24 
 1 file changed, 16 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index fda7867046d0..e75b490f2b0b 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -50,6 +50,8 @@
 #include 
 #include 
 
+#define CALGARY_MAPPING_ERROR  0
+
 #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
 int use_calgary __read_mostly = 1;
 #else
@@ -252,7 +254,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (panic_on_overflow)
panic("Calgary: fix the allocator.\n");
else
-   return DMA_ERROR_CODE;
+   return CALGARY_MAPPING_ERROR;
}
}
 
@@ -272,10 +274,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct 
iommu_table *tbl,
 
entry = iommu_range_alloc(dev, tbl, npages);
 
-   if (unlikely(entry == DMA_ERROR_CODE)) {
+   if (unlikely(entry == CALGARY_MAPPING_ERROR)) {
pr_warn("failed to allocate %u pages in iommu %p\n",
npages, tbl);
-   return DMA_ERROR_CODE;
+   return CALGARY_MAPPING_ERROR;
}
 
/* set the return dma address */
@@ -295,7 +297,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t 
dma_addr,
unsigned long flags;
 
/* were we called with bad_dma_address? */
-   badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
+   badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE);
if (unlikely(dma_addr < badend)) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
   "address 0x%Lx\n", dma_addr);
@@ -380,7 +382,7 @@ static int calgary_map_sg(struct device *dev, struct 
scatterlist *sg,
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
 
entry = iommu_range_alloc(dev, tbl, npages);
-   if (entry == DMA_ERROR_CODE) {
+   if (entry == CALGARY_MAPPING_ERROR) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
goto error;
@@ -398,7 +400,7 @@ static int calgary_map_sg(struct device *dev, struct 
scatterlist *sg,
 error:
calgary_unmap_sg(dev, sg, nelems, dir, 0);
for_each_sg(sg, s, nelems, i) {
-   sg->dma_address = DMA_ERROR_CODE;
+   sg->dma_address = CALGARY_MAPPING_ERROR;
sg->dma_length = 0;
}
return 0;
@@ -453,7 +455,7 @@ static void* calgary_alloc_coherent(struct device *dev, 
size_t size,
 
/* set up tces to cover the allocated range */
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
-   if (mapping == DMA_ERROR_CODE)
+   if (mapping == CALGARY_MAPPING_ERROR)
goto free;
*dma_handle = mapping;
return ret;
@@ -478,6 +480,11 @@ static void calgary_free_coherent(struct device *dev, 
size_t size,
free_pages((unsigned long)vaddr, get_order(size));
 }
 
+static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+   return dma_addr == CALGARY_MAPPING_ERROR;
+}
+
 static const struct dma_map_ops calgary_dma_ops = {
.alloc = calgary_alloc_coherent,
.free = calgary_free_coherent,
@@ -485,6 +492,7 @@ static const struct dma_map_ops calgary_dma_ops = {
.unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page,
.unmap_page = calgary_unmap_page,
+   .mapping_error = calgary_mapping_error,
 };
 
 static inline void __iomem * busno_to_bbar(unsigned char num)
@@ -732,7 +740,7 @@ static void __init calgary_reserve_regions(struct pci_dev 
*dev)
struct iommu_table *tbl = pci_iommu(dev->bus);
 
/* reserve EMERGENCY_PAGES from bad_dma_address and up */
-   iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
+   iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES);
 
/* avoid the BIOS/VGA first 640KB-1MB region */
/* for CalIOC2 - avoid the entire first MB */
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 17/44] hexagon: switch to use ->mapping_error for error reporting

2017-06-16 Thread Christoph Hellwig
Signed-off-by: Christoph Hellwig 
Acked-by: Richard Kuo 
---
 arch/hexagon/include/asm/dma-mapping.h |  2 --
 arch/hexagon/kernel/dma.c  | 12 +---
 arch/hexagon/kernel/hexagon_ksyms.c|  1 -
 3 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/arch/hexagon/include/asm/dma-mapping.h 
b/arch/hexagon/include/asm/dma-mapping.h
index d3a87bd9b686..00e3f10113b0 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -29,8 +29,6 @@
 #include 
 
 struct device;
-extern int bad_dma_address;
-#define DMA_ERROR_CODE bad_dma_address
 
 extern const struct dma_map_ops *dma_ops;
 
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index e74b65009587..71269dc0f225 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -25,11 +25,11 @@
 #include 
 #include 
 
+#define HEXAGON_MAPPING_ERROR  0
+
 const struct dma_map_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
-int bad_dma_address;  /*  globals are automatically initialized to zero  */
-
 static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
 {
return phys_to_virt((unsigned long) dma_addr);
@@ -181,7 +181,7 @@ static dma_addr_t hexagon_map_page(struct device *dev, 
struct page *page,
WARN_ON(size == 0);
 
if (!check_addr("map_single", dev, bus, size))
-   return bad_dma_address;
+   return HEXAGON_MAPPING_ERROR;
 
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_sync(dma_addr_to_virt(bus), size, dir);
@@ -203,6 +203,11 @@ static void hexagon_sync_single_for_device(struct device 
*dev,
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
 }
 
+static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+   return dma_addr == HEXAGON_MAPPING_ERROR;
+}
+
 const struct dma_map_ops hexagon_dma_ops = {
.alloc  = hexagon_dma_alloc_coherent,
.free   = hexagon_free_coherent,
@@ -210,6 +215,7 @@ const struct dma_map_ops hexagon_dma_ops = {
.map_page   = hexagon_map_page,
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
.sync_single_for_device = hexagon_sync_single_for_device,
+   .mapping_error  = hexagon_mapping_error;
.is_phys= 1,
 };
 
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c 
b/arch/hexagon/kernel/hexagon_ksyms.c
index 00bcad9cbd8f..aa248f595431 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -40,7 +40,6 @@ EXPORT_SYMBOL(memset);
 /* Additional variables */
 EXPORT_SYMBOL(__phys_offset);
 EXPORT_SYMBOL(_dflt_cache_att);
-EXPORT_SYMBOL(bad_dma_address);
 
 #define DECLARE_EXPORT(name) \
extern void name(void); EXPORT_SYMBOL(name)
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 22/44] x86/pci-nommu: implement ->mapping_error

2017-06-16 Thread Christoph Hellwig
DMA_ERROR_CODE is going to go away, so don't rely on it.

Signed-off-by: Christoph Hellwig 
---
 arch/x86/kernel/pci-nommu.c | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index a88952ef371c..085fe6ce4049 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -11,6 +11,8 @@
 #include 
 #include 
 
+#define NOMMU_MAPPING_ERROR0
+
 static int
 check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
 {
@@ -33,7 +35,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct 
page *page,
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
-   return DMA_ERROR_CODE;
+   return NOMMU_MAPPING_ERROR;
flush_write_buffers();
return bus;
 }
@@ -88,6 +90,11 @@ static void nommu_sync_sg_for_device(struct device *dev,
flush_write_buffers();
 }
 
+static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+   return dma_addr == NOMMU_MAPPING_ERROR;
+}
+
 const struct dma_map_ops nommu_dma_ops = {
.alloc  = dma_generic_alloc_coherent,
.free   = dma_generic_free_coherent,
@@ -96,4 +103,5 @@ const struct dma_map_ops nommu_dma_ops = {
.sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys= 1,
+   .mapping_error  = nommu_mapping_error,
 };
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 21/44] powerpc: implement ->mapping_error

2017-06-16 Thread Christoph Hellwig
DMA_ERROR_CODE is going to go away, so don't rely on it.  Instead
define a ->mapping_error method for all IOMMU based dma operation
instances.  The direct ops don't ever return an error and don't
need a ->mapping_error method.

Signed-off-by: Christoph Hellwig 
Acked-by: Michael Ellerman 
---
 arch/powerpc/include/asm/dma-mapping.h |  4 
 arch/powerpc/include/asm/iommu.h   |  4 
 arch/powerpc/kernel/dma-iommu.c|  6 ++
 arch/powerpc/kernel/iommu.c| 28 ++--
 arch/powerpc/platforms/cell/iommu.c|  1 +
 arch/powerpc/platforms/pseries/vio.c   |  3 ++-
 6 files changed, 27 insertions(+), 19 deletions(-)

diff --git a/arch/powerpc/include/asm/dma-mapping.h 
b/arch/powerpc/include/asm/dma-mapping.h
index 181a095468e4..73aedbe6c977 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -17,10 +17,6 @@
 #include 
 #include 
 
-#ifdef CONFIG_PPC64
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-#endif
-
 /* Some dma direct funcs must be visible for use in other dma_ops */
 extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
 dma_addr_t *dma_handle, gfp_t flag,
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 8a8ce220d7d0..20febe0b7f32 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -139,6 +139,8 @@ struct scatterlist;
 
 #ifdef CONFIG_PPC64
 
+#define IOMMU_MAPPING_ERROR(~(dma_addr_t)0x0)
+
 static inline void set_iommu_table_base(struct device *dev,
struct iommu_table *base)
 {
@@ -238,6 +240,8 @@ static inline int __init tce_iommu_bus_notifier_init(void)
 }
 #endif /* !CONFIG_IOMMU_API */
 
+int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
 #else
 
 static inline void *get_iommu_table_base(struct device *dev)
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index fb7cbaa37658..8f7abf9baa63 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -105,6 +105,11 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
return mask;
 }
 
+int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+   return dma_addr == IOMMU_MAPPING_ERROR;
+}
+
 struct dma_map_ops dma_iommu_ops = {
.alloc  = dma_iommu_alloc_coherent,
.free   = dma_iommu_free_coherent,
@@ -115,5 +120,6 @@ struct dma_map_ops dma_iommu_ops = {
.map_page   = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask  = dma_iommu_get_required_mask,
+   .mapping_error  = dma_iommu_mapping_error,
 };
 EXPORT_SYMBOL(dma_iommu_ops);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index f2b724cd9e64..233ca3fe4754 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
-   return DMA_ERROR_CODE;
+   return IOMMU_MAPPING_ERROR;
}
 
if (should_fail_iommu(dev))
-   return DMA_ERROR_CODE;
+   return IOMMU_MAPPING_ERROR;
 
/*
 * We don't need to disable preemption here because any CPU can
@@ -278,7 +278,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
-   return DMA_ERROR_CODE;
+   return IOMMU_MAPPING_ERROR;
}
}
 
@@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct 
iommu_table *tbl,
  unsigned long attrs)
 {
unsigned long entry;
-   dma_addr_t ret = DMA_ERROR_CODE;
+   dma_addr_t ret = IOMMU_MAPPING_ERROR;
int build_fail;
 
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 
-   if (unlikely(entry == DMA_ERROR_CODE))
-   return DMA_ERROR_CODE;
+   if (unlikely(entry == IOMMU_MAPPING_ERROR))
+   return IOMMU_MAPPING_ERROR;
 
entry += tbl->it_offset;/* Offset into real TCE table */
ret = entry << tbl->it_page_shift;  /* Set the return dma address */
@@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct 
iommu_table *tbl,
 
/* tbl->it_ops->set() only returns non-zero for transient errors.
 * Clean up the table bitmap in this case and return
-* DMA_ERROR_CODE. For all other errors the functionality is
+* IOMMU_MAPPING_ERROR. For all other errors the functionality 

[Xen-devel] [PATCH 24/44] x86: remove DMA_ERROR_CODE

2017-06-16 Thread Christoph Hellwig
All dma_map_ops instances now handle their errors through
->mapping_error.

Signed-off-by: Christoph Hellwig 
---
 arch/x86/include/asm/dma-mapping.h | 2 --
 1 file changed, 2 deletions(-)

diff --git a/arch/x86/include/asm/dma-mapping.h 
b/arch/x86/include/asm/dma-mapping.h
index 08a0838b83fb..c35d228aa381 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -19,8 +19,6 @@
 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
 #endif
 
-#define DMA_ERROR_CODE 0
-
 extern int iommu_merge;
 extern struct device x86_dma_fallback_dev;
 extern int panic_on_overflow;
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 25/44] arm: implement ->mapping_error

2017-06-16 Thread Christoph Hellwig
DMA_ERROR_CODE is going to go away, so don't rely on it.

Signed-off-by: Christoph Hellwig 
---
 arch/arm/common/dmabounce.c| 13 +---
 arch/arm/include/asm/dma-iommu.h   |  2 ++
 arch/arm/include/asm/dma-mapping.h |  1 -
 arch/arm/mm/dma-mapping.c  | 41 --
 4 files changed, 38 insertions(+), 19 deletions(-)

diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 9b1b7be2ec0e..4060378e0f14 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -33,6 +33,7 @@
 #include 
 
 #include 
+#include 
 
 #undef STATS
 
@@ -256,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, 
void *ptr, size_t size,
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
   __func__, ptr);
-   return DMA_ERROR_CODE;
+   return ARM_MAPPING_ERROR;
}
 
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -326,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, 
struct page *page,
 
ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
-   return DMA_ERROR_CODE;
+   return ARM_MAPPING_ERROR;
 
if (ret == 0) {
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
@@ -335,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, 
struct page *page,
 
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not 
supported\n");
-   return DMA_ERROR_CODE;
+   return ARM_MAPPING_ERROR;
}
 
return map_single(dev, page_address(page) + offset, size, dir, attrs);
@@ -452,6 +453,11 @@ static int dmabounce_set_mask(struct device *dev, u64 
dma_mask)
return arm_dma_ops.set_dma_mask(dev, dma_mask);
 }
 
+static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+   return arm_dma_ops.mapping_error(dev, dma_addr);
+}
+
 static const struct dma_map_ops dmabounce_ops = {
.alloc  = arm_dma_alloc,
.free   = arm_dma_free,
@@ -466,6 +472,7 @@ static const struct dma_map_ops dmabounce_ops = {
.sync_sg_for_cpu= arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.set_dma_mask   = dmabounce_set_mask,
+   .mapping_error  = dmabounce_mapping_error,
 };
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 2ef282f96651..389a26a10ea3 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -9,6 +9,8 @@
 #include 
 #include 
 
+#define ARM_MAPPING_ERROR  (~(dma_addr_t)0x0)
+
 struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
diff --git a/arch/arm/include/asm/dma-mapping.h 
b/arch/arm/include/asm/dma-mapping.h
index 680d3f3889e7..52a8fd5a8edb 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -12,7 +12,6 @@
 #include 
 #include 
 
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
 extern const struct dma_map_ops arm_dma_ops;
 extern const struct dma_map_ops arm_coherent_dma_ops;
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bd83c531828a..8f2c5a8a98f0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -180,6 +180,11 @@ static void arm_dma_sync_single_for_device(struct device 
*dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
+static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+   return dma_addr == ARM_MAPPING_ERROR;
+}
+
 const struct dma_map_ops arm_dma_ops = {
.alloc  = arm_dma_alloc,
.free   = arm_dma_free,
@@ -193,6 +198,7 @@ const struct dma_map_ops arm_dma_ops = {
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu= arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
+   .mapping_error  = arm_dma_mapping_error,
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
@@ -211,6 +217,7 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable= arm_dma_get_sgtable,
.map_page   = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
+   .mapping_error  = arm_dma_mapping_error,
 };
 EXPORT_SYMBOL(arm_coherent_dma_ops);
 
@@ -799,7 +806,7 @@ static void *__dma_alloc(struct device *dev, size_t size, 
dma_addr_t *handle,
gfp &= ~(__GFP_COMP);
args.gfp = gfp;
 
-   *handle = DMA_ERROR_CODE;
+   *handle = ARM_MAPPING_ERROR;
allowblock = gfpflags_allow_blocking(gfp);
cma = allowblock ? 

[Xen-devel] [PATCH 18/44] iommu/amd: implement ->mapping_error

2017-06-16 Thread Christoph Hellwig
DMA_ERROR_CODE is going to go away, so don't rely on it.

Signed-off-by: Christoph Hellwig 
---
 drivers/iommu/amd_iommu.c | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 63cacf5d6cf2..d41280e869de 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -54,6 +54,8 @@
 #include "amd_iommu_types.h"
 #include "irq_remapping.h"
 
+#define AMD_IOMMU_MAPPING_ERROR0
+
 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
 
 #define LOOP_TIMEOUT   10
@@ -2394,7 +2396,7 @@ static dma_addr_t __map_single(struct device *dev,
paddr &= PAGE_MASK;
 
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
-   if (address == DMA_ERROR_CODE)
+   if (address == AMD_IOMMU_MAPPING_ERROR)
goto out;
 
prot = dir2prot(direction);
@@ -2431,7 +2433,7 @@ static dma_addr_t __map_single(struct device *dev,
 
dma_ops_free_iova(dma_dom, address, pages);
 
-   return DMA_ERROR_CODE;
+   return AMD_IOMMU_MAPPING_ERROR;
 }
 
 /*
@@ -2483,7 +2485,7 @@ static dma_addr_t map_page(struct device *dev, struct 
page *page,
if (PTR_ERR(domain) == -EINVAL)
return (dma_addr_t)paddr;
else if (IS_ERR(domain))
-   return DMA_ERROR_CODE;
+   return AMD_IOMMU_MAPPING_ERROR;
 
dma_mask = *dev->dma_mask;
dma_dom = to_dma_ops_domain(domain);
@@ -2560,7 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist 
*sglist,
npages = sg_num_pages(dev, sglist, nelems);
 
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
-   if (address == DMA_ERROR_CODE)
+   if (address == AMD_IOMMU_MAPPING_ERROR)
goto out_err;
 
prot = dir2prot(direction);
@@ -2683,7 +2685,7 @@ static void *alloc_coherent(struct device *dev, size_t 
size,
*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
 size, DMA_BIDIRECTIONAL, dma_mask);
 
-   if (*dma_addr == DMA_ERROR_CODE)
+   if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
goto out_free;
 
return page_address(page);
@@ -2732,6 +2734,11 @@ static int amd_iommu_dma_supported(struct device *dev, 
u64 mask)
return check_device(dev);
 }
 
+static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+   return dma_addr == AMD_IOMMU_MAPPING_ERROR;
+}
+
 static const struct dma_map_ops amd_iommu_dma_ops = {
.alloc  = alloc_coherent,
.free   = free_coherent,
@@ -2740,6 +2747,7 @@ static const struct dma_map_ops amd_iommu_dma_ops = {
.map_sg = map_sg,
.unmap_sg   = unmap_sg,
.dma_supported  = amd_iommu_dma_supported,
+   .mapping_error  = amd_iommu_mapping_error,
 };
 
 static int init_reserved_iova_ranges(void)
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 26/44] dma-mapping: remove DMA_ERROR_CODE

2017-06-16 Thread Christoph Hellwig
And update the documentation - dma_mapping_error has been supported
everywhere for a long time.

Signed-off-by: Christoph Hellwig 
---
 Documentation/DMA-API-HOWTO.txt | 31 +--
 include/linux/dma-mapping.h |  5 -
 2 files changed, 5 insertions(+), 31 deletions(-)

diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 979228bc9035..4ed388356898 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -550,32 +550,11 @@ and to unmap it:
dma_unmap_single(dev, dma_handle, size, direction);
 
 You should call dma_mapping_error() as dma_map_single() could fail and return
-error. Not all DMA implementations support the dma_mapping_error() interface.
-However, it is a good practice to call dma_mapping_error() interface, which
-will invoke the generic mapping error check interface. Doing so will ensure
-that the mapping code will work correctly on all DMA implementations without
-any dependency on the specifics of the underlying implementation. Using the
-returned address without checking for errors could result in failures ranging
-from panics to silent data corruption. A couple of examples of incorrect ways
-to check for errors that make assumptions about the underlying DMA
-implementation are as follows and these are applicable to dma_map_page() as
-well.
-
-Incorrect example 1:
-   dma_addr_t dma_handle;
-
-   dma_handle = dma_map_single(dev, addr, size, direction);
-   if ((dma_handle & 0x != 0) || (dma_handle >= 0x100)) {
-   goto map_error;
-   }
-
-Incorrect example 2:
-   dma_addr_t dma_handle;
-
-   dma_handle = dma_map_single(dev, addr, size, direction);
-   if (dma_handle == DMA_ERROR_CODE) {
-   goto map_error;
-   }
+error.  Doing so will ensure that the mapping code will work correctly on all
+DMA implementations without any dependency on the specifics of the underlying
+implementation. Using the returned address without checking for errors could
+result in failures ranging from panics to silent data corruption.  The same
+applies to dma_map_page() as well.
 
 You should call dma_unmap_single() when the DMA activity is finished, e.g.,
 from the interrupt which told you that the DMA transfer is done.
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4f3eecedca2d..a57875309bfd 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -546,12 +546,7 @@ static inline int dma_mapping_error(struct device *dev, 
dma_addr_t dma_addr)
 
if (get_dma_ops(dev)->mapping_error)
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
-
-#ifdef DMA_ERROR_CODE
-   return dma_addr == DMA_ERROR_CODE;
-#else
return 0;
-#endif
 }
 
 #ifndef HAVE_ARCH_DMA_SUPPORTED
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 16/44] arm64: remove DMA_ERROR_CODE

2017-06-16 Thread Christoph Hellwig
The dma alloc interface returns an error by return NULL, and the
mapping interfaces rely on the mapping_error method, which the dummy
ops already implement correctly.

Thus remove the DMA_ERROR_CODE define.

Signed-off-by: Christoph Hellwig 
Reviewed-by: Robin Murphy 
---
 arch/arm64/include/asm/dma-mapping.h | 1 -
 arch/arm64/mm/dma-mapping.c  | 3 +--
 2 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/dma-mapping.h 
b/arch/arm64/include/asm/dma-mapping.h
index 5392dbeffa45..cf8fc8f05580 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -24,7 +24,6 @@
 #include 
 #include 
 
-#define DMA_ERROR_CODE (~(dma_addr_t)0)
 extern const struct dma_map_ops dummy_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 3216e098c058..147fbb907a2f 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -184,7 +184,6 @@ static void *__dma_alloc(struct device *dev, size_t size,
 no_map:
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
 no_mem:
-   *dma_handle = DMA_ERROR_CODE;
return NULL;
 }
 
@@ -487,7 +486,7 @@ static dma_addr_t __dummy_map_page(struct device *dev, 
struct page *page,
   enum dma_data_direction dir,
   unsigned long attrs)
 {
-   return DMA_ERROR_CODE;
+   return 0;
 }
 
 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH 19/44] s390: implement ->mapping_error

2017-06-16 Thread Christoph Hellwig
s390 can also use noop_dma_ops, and while that currently does not return
errors it will so in the future.  Implementing the mapping_error method
is the proper way to have per-ops error conditions.

Signed-off-by: Christoph Hellwig 
Acked-by: Gerald Schaefer 
---
 arch/s390/include/asm/dma-mapping.h |  2 --
 arch/s390/pci/pci_dma.c | 18 +-
 2 files changed, 13 insertions(+), 7 deletions(-)

diff --git a/arch/s390/include/asm/dma-mapping.h 
b/arch/s390/include/asm/dma-mapping.h
index 3108b8dbe266..512ad0eaa11a 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -8,8 +8,6 @@
 #include 
 #include 
 
-#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
-
 extern const struct dma_map_ops s390_pci_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 9081a57fa340..ea623faab525 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -14,6 +14,8 @@
 #include 
 #include 
 
+#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
+
 static struct kmem_cache *dma_region_table_cache;
 static struct kmem_cache *dma_page_table_cache;
 static int s390_iommu_strict;
@@ -281,7 +283,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int 
size)
 
 out_error:
spin_unlock_irqrestore(>iommu_bitmap_lock, flags);
-   return DMA_ERROR_CODE;
+   return S390_MAPPING_ERROR;
 }
 
 static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
@@ -329,7 +331,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, 
struct page *page,
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
dma_addr = dma_alloc_address(dev, nr_pages);
-   if (dma_addr == DMA_ERROR_CODE) {
+   if (dma_addr == S390_MAPPING_ERROR) {
ret = -ENOSPC;
goto out_err;
}
@@ -352,7 +354,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, 
struct page *page,
 out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
-   return DMA_ERROR_CODE;
+   return S390_MAPPING_ERROR;
 }
 
 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
@@ -429,7 +431,7 @@ static int __s390_dma_map_sg(struct device *dev, struct 
scatterlist *sg,
int ret;
 
dma_addr_base = dma_alloc_address(dev, nr_pages);
-   if (dma_addr_base == DMA_ERROR_CODE)
+   if (dma_addr_base == S390_MAPPING_ERROR)
return -ENOMEM;
 
dma_addr = dma_addr_base;
@@ -476,7 +478,7 @@ static int s390_dma_map_sg(struct device *dev, struct 
scatterlist *sg,
for (i = 1; i < nr_elements; i++) {
s = sg_next(s);
 
-   s->dma_address = DMA_ERROR_CODE;
+   s->dma_address = S390_MAPPING_ERROR;
s->dma_length = 0;
 
if (s->offset || (size & ~PAGE_MASK) ||
@@ -525,6 +527,11 @@ static void s390_dma_unmap_sg(struct device *dev, struct 
scatterlist *sg,
s->dma_length = 0;
}
 }
+   
+static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+   return dma_addr == S390_MAPPING_ERROR;
+}
 
 int zpci_dma_init_device(struct zpci_dev *zdev)
 {
@@ -657,6 +664,7 @@ const struct dma_map_ops s390_pci_dma_ops = {
.unmap_sg   = s390_dma_unmap_sg,
.map_page   = s390_dma_map_pages,
.unmap_page = s390_dma_unmap_pages,
+   .mapping_error  = s390_mapping_error,
/* if we support direct DMA this must be conditional */
.is_phys= 0,
/* dma_supported is unconditionally true without a callback */
-- 
2.11.0


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


  1   2   3   >