[PATCH] target/user: Fix possible overwrite of t_data_sg's last iov[]

2017-02-27 Thread lixiubo
From: Xiubo Li 

If there has BIDI data, its first iov[] will overwrite the last
iov[] for se_cmd->t_data_sg.

To fix this, we can just increase the iov pointer, but this may
introuduce a new memory leakage bug: If the se_cmd->data_length
and se_cmd->t_bidi_data_sg->length are all not aligned up to the
DATA_BLOCK_SIZE, the actual length needed maybe larger than just
sum of them.

So, this could be avoided by rounding all the data lengthes up
to DATA_BLOCK_SIZE.

Signed-off-by: Xiubo Li 
---
 drivers/target/target_core_user.c | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/target/target_core_user.c 
b/drivers/target/target_core_user.c
index 2e33100..59a18fd 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -429,10 +429,11 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, 
size_t cmd_size, size_t d
 
mb = udev->mb_addr;
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
-   data_length = se_cmd->data_length;
+   data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
-   data_length += se_cmd->t_bidi_data_sg->length;
+   data_length += round_up(se_cmd->t_bidi_data_sg->length,
+   DATA_BLOCK_SIZE);
}
if ((command_size > (udev->cmdr_size / 2)) ||
data_length > udev->data_size) {
@@ -503,10 +504,14 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, 
size_t cmd_size, size_t d
entry->req.iov_dif_cnt = 0;
 
/* Handle BIDI commands */
-   iov_cnt = 0;
-   alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
-   se_cmd->t_bidi_data_nents, , _cnt, false);
-   entry->req.iov_bidi_cnt = iov_cnt;
+   if (se_cmd->se_cmd_flags & SCF_BIDI) {
+   iov_cnt = 0;
+   iov++;
+   alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+   se_cmd->t_bidi_data_nents, , _cnt,
+   false);
+   entry->req.iov_bidi_cnt = iov_cnt;
+   }
 
/* cmd's data_bitmap is what changed in process */
bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
-- 
1.8.3.1





[scsi:misc 40/55] arch/alpha/include/asm/atomic.h:20:38: error: 'refcount_t {aka struct refcount_struct}' has no member named 'counter'

2017-02-27 Thread kbuild test robot
tree:   https://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git misc
head:   2559a1ef688f933835912c731bed2254146a9b04
commit: 61d8658b4a435eac729966cc94cdda077a8df5cd [40/55] scsi: qedf: Add QLogic 
FastLinQ offload FCoE driver framework.
config: alpha-allyesconfig (attached as .config)
compiler: alpha-linux-gnu-gcc (Debian 6.1.1-9) 6.1.1 20160705
reproduce:
wget 
https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross
 -O ~/bin/make.cross
chmod +x ~/bin/make.cross
git checkout 61d8658b4a435eac729966cc94cdda077a8df5cd
# save the attached .config to linux build tree
make.cross ARCH=alpha 

Note: the scsi/misc HEAD 2559a1ef688f933835912c731bed2254146a9b04 builds fine.
  It only hurts bisectibility.

All errors (new ones prefixed by >>):

   In file included from include/linux/linkage.h:4:0,
from include/linux/preempt.h:9,
from include/linux/spinlock.h:50,
from drivers/scsi/qedf/qedf_io.c:9:
   drivers/scsi/qedf/qedf_io.c: In function 'qedf_trace_io':
>> arch/alpha/include/asm/atomic.h:20:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:309:17: note: in definition of macro '__READ_ONCE'
 union { typeof(x) __val; char __c[1]; } __u;   \
^
   arch/alpha/include/asm/atomic.h:20:25: note: in expansion of macro 
'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
>> arch/alpha/include/asm/atomic.h:20:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:311:22: note: in definition of macro '__READ_ONCE'
  __read_once_size(&(x), __u.__c, sizeof(x));  \
 ^
   arch/alpha/include/asm/atomic.h:20:25: note: in expansion of macro 
'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
>> arch/alpha/include/asm/atomic.h:20:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:311:42: note: in definition of macro '__READ_ONCE'
  __read_once_size(&(x), __u.__c, sizeof(x));  \
 ^
   arch/alpha/include/asm/atomic.h:20:25: note: in expansion of macro 
'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
>> arch/alpha/include/asm/atomic.h:20:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:313:30: note: in definition of macro '__READ_ONCE'
  __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
 ^
   arch/alpha/include/asm/atomic.h:20:25: note: in expansion of macro 
'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
>> arch/alpha/include/asm/atomic.h:20:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:313:50: note: in definition of macro '__READ_ONCE'
  __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
 ^
   arch/alpha/include/asm/atomic.h:20:25: note: in expansion of macro 
'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
   drivers/scsi/qedf/qedf_io.c: In function 'qedf_scsi_completion':
>> arch/alpha/include/asm/atomic.h:20:38: error: 'refcount_t {aka struct 
>> refcount_struct}' 

Re: [PATCH v2] scsi: ufs: Factor out ufshcd_read_desc_param

2017-02-27 Thread Martin K. Petersen
> "Michal" == Potomski, MichalX  writes:

Michal> Since in UFS 2.1 specification some of the descriptor lengths
Michal> differs from 2.0 specification and some devices, which are
Michal> reporting spec version 2.0 have different descriptor lengths we
Michal> can not rely on hardcoded values taken from 2.0
Michal> specification. This patch introduces reading these lengths per
Michal> each device from descriptor headers at probe time to ensure
Michal> their correctness.

Subhash: I assume v2 still carries your Reviewed-by:?

-- 
Martin K. Petersen  Oracle Linux Engineering


Re: [PATCH] scsi: aacraid: remove redundant zero check on ret

2017-02-27 Thread Martin K. Petersen
> "Colin" == Colin King  writes:

Colin,

Colin> The check for ret being zero is redundant as a few statements
Colin> earlier we break out of the while loop if ret is non-zero.  Thus
Colin> we can remove the zero check and also the dead-code non-zero case
Colin> too.

Applied to 4.11/scsi-fixes.

-- 
Martin K. Petersen  Oracle Linux Engineering


Re: [PATCH] sd: close hole in > 2T device rejection when !CONFIG_LBDAF

2017-02-27 Thread Martin K. Petersen
> "Bart" == Bart Van Assche  writes:

Bart,

Bart> Sorry but I still don't understand why the two checks are
Bart> different. How about the (untested) patch below? The approach
Bart> below avoids that the check is duplicated and - at least in my
Bart> opinion - results in code that is easier to read.

I'll take a closer look at your patch tomorrow. I am sympathetic to
having a sanity check helper function. That would also give us a single
place to filter out crackpot values reported by USB doodads.

-- 
Martin K. Petersen  Oracle Linux Engineering


Re: [PATCH] qedi: Fix memory leak in tmf response processing.

2017-02-27 Thread Martin K. Petersen
> "Manish" == Manish Rangankar  writes:

Manish,

Applied to 4.11/scsi-fixes.

-- 
Martin K. Petersen  Oracle Linux Engineering


Re: [PATCH] scsi: lpfc: use div_u64 for 64-bit division

2017-02-27 Thread Martin K. Petersen
> "Arnd" == Arnd Bergmann  writes:

Arnd> The new debugfs output causes a link error on 32-bit
Arnd> architectures: ERROR: "__aeabi_uldivmod"
Arnd> [drivers/scsi/lpfc/lpfc.ko] undefined!

Arnd> This code is not performance critical, so we can simply use
Arnd> div_u64().

Applied to 4.11/scsi-fixes.

-- 
Martin K. Petersen  Oracle Linux Engineering


Re: [PATCH] scsi: lpfc: use proper format string for dma_addr_t

2017-02-27 Thread Martin K. Petersen
> "Arnd" == Arnd Bergmann  writes:

Arnd> dma_addr_t may be either u32 or u64, depending on the kernel
Arnd> configuration, and we get a warning for the 32-bit case:

Applied to 4.11/scsi-fixes.

-- 
Martin K. Petersen  Oracle Linux Engineering


Re: [PATCH] scsi_error: count medium access timeout only once per EH run

2017-02-27 Thread Martin K. Petersen
> "Ewan" == Ewan D Milne  writes:

Ewan,

Ewan> So, this is good, the current implementation has a flaw in that
Ewan> under certain conditions, a device will get offlined immediately,
Ewan> (i.e. if there are a few medium access commands pending, and they
Ewan> all timeout), which isn't what was intended.

Yeah. That was OK for my use case. I was trying to prevent the server
from going into a tail spin. There was no chance of recovering the disk.

But ideally we'd be offlining based on how many times we retry the same
medium access command.

Ewan> as separate medium access timeouts, but I think the original
Ewan> intent of Martin's change wasn't to operate on such a short
Ewan> time-scale, am I right, Martin?

On the device that begat my original patch, SPC command responses were
handled by the SAS controller firmware on behalf of all discovered
devices. Regardless of whether said drives were still alive or not.

Medium Access commands, however, would always get passed on to the
physical drive for processing. So when a drive went pining for the
fjords, TUR would always succeed whereas reads and writes would time
out.

-- 
Martin K. Petersen  Oracle Linux Engineering


Re: [PATCH 2/2] Add S6 support

2017-02-27 Thread Martin K. Petersen
> "Charles" == Charles Chiou  writes:

Charles,

Charles> Hi all, Ping?  Does this patch has any issue need to fix? Thank
Charles> you.

I'm hoping somebody will review it soon.

However, we're in the middle of the 4.11 merge window so people are
mostly focused on regressions. I expect to commence merging code for
4.12 sometime next week.

-- 
Martin K. Petersen  Oracle Linux Engineering


[scsi:misc 40/55] arch/ia64/include/asm/atomic.h:24:38: error: 'refcount_t {aka struct refcount_struct}' has no member named 'counter'

2017-02-27 Thread kbuild test robot
tree:   https://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git misc
head:   2559a1ef688f933835912c731bed2254146a9b04
commit: 61d8658b4a435eac729966cc94cdda077a8df5cd [40/55] scsi: qedf: Add QLogic 
FastLinQ offload FCoE driver framework.
config: ia64-allmodconfig (attached as .config)
compiler: ia64-linux-gcc (GCC) 6.2.0
reproduce:
wget 
https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross
 -O ~/bin/make.cross
chmod +x ~/bin/make.cross
git checkout 61d8658b4a435eac729966cc94cdda077a8df5cd
# save the attached .config to linux build tree
make.cross ARCH=ia64 

Note: the scsi/misc HEAD 2559a1ef688f933835912c731bed2254146a9b04 builds fine.
  It only hurts bisectibility.

All errors (new ones prefixed by >>):

   In file included from include/linux/linkage.h:4:0,
from include/linux/preempt.h:9,
from include/linux/spinlock.h:50,
from drivers/scsi/qedf/qedf_io.c:9:
   drivers/scsi/qedf/qedf_io.c: In function 'qedf_trace_io':
>> arch/ia64/include/asm/atomic.h:24:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:309:17: note: in definition of macro '__READ_ONCE'
 union { typeof(x) __val; char __c[1]; } __u;   \
^
   arch/ia64/include/asm/atomic.h:24:25: note: in expansion of macro 'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
>> arch/ia64/include/asm/atomic.h:24:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:311:22: note: in definition of macro '__READ_ONCE'
  __read_once_size(&(x), __u.__c, sizeof(x));  \
 ^
   arch/ia64/include/asm/atomic.h:24:25: note: in expansion of macro 'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
>> arch/ia64/include/asm/atomic.h:24:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:311:42: note: in definition of macro '__READ_ONCE'
  __read_once_size(&(x), __u.__c, sizeof(x));  \
 ^
   arch/ia64/include/asm/atomic.h:24:25: note: in expansion of macro 'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
>> arch/ia64/include/asm/atomic.h:24:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:313:30: note: in definition of macro '__READ_ONCE'
  __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
 ^
   arch/ia64/include/asm/atomic.h:24:25: note: in expansion of macro 'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
>> arch/ia64/include/asm/atomic.h:24:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define atomic_read(v)  READ_ONCE((v)->counter)
 ^
   include/linux/compiler.h:313:50: note: in definition of macro '__READ_ONCE'
  __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
 ^
   arch/ia64/include/asm/atomic.h:24:25: note: in expansion of macro 'READ_ONCE'
#define atomic_read(v)  READ_ONCE((v)->counter)
^
   drivers/scsi/qedf/qedf_io.c:1001:21: note: in expansion of macro 
'atomic_read'
 io_log->refcount = atomic_read(_req->refcount.refcount);
^~~
   drivers/scsi/qedf/qedf_io.c: In function 'qedf_scsi_completion':
>> arch/ia64/include/asm/atomic.h:24:38: error: 'refcount_t {aka struct 
>> refcount_struct}' has no member named 'counter'
#define 

Re: SCSI regression in 4.11

2017-02-27 Thread Jens Axboe
On 02/27/2017 06:19 PM, Stephen Hemminger wrote:
> On Mon, 27 Feb 2017 15:30:30 -0800
> Stephen Hemminger  wrote:
> 
>> Something in SCSI in 4.11 broke booting on Hyper-V Generation 2 VM with 8 
>> VCPU and 4G of memory.
>> Both Linus's current tree (4.11 pre-rc1) and linux-next fail in a similar 
>> manner. It looks like some error
>> in SCSI device detection because there is only a single device.
>>
>> The offending commit causing the regression is:
>>
>> $ git bisect bad
>> e9c787e65c0c36529745be47d490d998b4b6e589 is the first bad commit
>> commit e9c787e65c0c36529745be47d490d998b4b6e589
>> Author: Christoph Hellwig 
>> Date:   Mon Jan 2 21:55:26 2017 +0300
>>
>> scsi: allocate scsi_cmnd structures as part of struct request
>> 
>> Rely on the new block layer functionality to allocate additional driver
>> specific data behind struct request instead of implementing it in SCSI
>> itѕelf.
>> 
>> Signed-off-by: Christoph Hellwig 
>> Acked-by: Martin K. Petersen 
>> Reviewed-by: Hannes Reinecke 
>> Signed-off-by: Jens Axboe 
>>
>> :04 04 6ff016fcdae227efeb19c1c301b17ccd7ea35da6 
>> 70d79f99d9b79ecf4dccbe067fc697219f5c78da M   drivers
>> :04 04 a672ff52df8b2c211b3f98cae4a88d8a96ccde0b 
>> 1aaaed7de0994f597c7f8290c722a0b4a7789429 M   include
>>
>> I checked and tree is current and up to date and includes 
>> commit ee5242360424b9b967454e9183767323d10cf985
>> Author: Christoph Hellwig 
>> Date:   Tue Feb 21 10:04:55 2017 +0100
>>
>> scsi: zero per-cmd driver data before each I/O
>>
>> Kernel config is attached. It started with Ubuntu config, but then did 
>> localmodconfig and pruned
>> out unnecessary stuff.
>>
> 
> This problem I am seeing looks like the one addressed by:
> 
> Fixes: ee5242360424 ("scsi: zero per-cmd driver data before each I/O")
> 
> but that is already in linux-next.
> 
> Noticed another place where memset(of the data was being done not the extra 
> bits.
> Tried this, but didn't fix it either...

Yeah, that fix is already in Linus's tree. But it does look like a replica of
what Dexuan reported. Out of curiosity, does it boot if you enable
CONFIG_SCSI_MQ_DEFAULT?

Christoph, looks like the previous fix wasn't complete...

-- 
Jens Axboe



Re: [PATCH] target/user: Add daynmic growing data area featuresupport

2017-02-27 Thread Xiubo Li

Hi Mike

Thanks verrry much for your work and test cases.



From: Xiubo Li 

Currently for the TCMU, the ring buffer size is fixed to 64K cmd
area + 1M data area, and this will be bottlenecks for high iops.

Hi Xiubo, thanks for your work.

daynmic -> dynamic

Have you benchmarked this patch and determined what kind of iops
improvement it allows? Do you see the data area reaching its
fully-allocated size?


I tested this patch with Venky's tcmu-runner rbd aio patches, with one
10 gig iscsi session, and for pretty basic fio direct io (64 -256K
read/writes with a queue depth of 64 numjobs between 1 and 4) tests read
throughput goes from about 80 to 500 MB/s.

Looks nice.


Write throughput is pretty
low at around 150 MB/s.

What's the original write throughput without this patch? Is it also
around 80 MB/s ?


I did not hit the fully allocated size. I did not drive a lot of IO though.

Yes, if the cmd area won't hit the fully allocated size, the data area is
also very hard to hit the fully allocated size.

And for the 64MB cmd area size is a little larger for 1GB data area.

Next, I will down it to smaller as needed.

Thanks,

BRs
Xiubo






Re: [Lsf-pc] [LSF/MM TOPIC] do we really need PG_error at all?

2017-02-27 Thread Jeff Layton
On Tue, 2017-02-28 at 10:32 +1100, NeilBrown wrote:
> On Mon, Feb 27 2017, Andreas Dilger wrote:
> 
> > 
> > My thought is that PG_error is definitely useful for applications to get
> > correct errors back when doing write()/sync_file_range() so that they know
> > there is an error in the data that _they_ wrote, rather than receiving an
> > error for data that may have been written by another thread, and in turn
> > clearing the error from another thread so it *doesn't* know it had a write
> > error.
> 
> It might be useful in that way, but it is not currently used that way.
> Such usage would be a change in visible behaviour.
> 
> sync_file_range() calls filemap_fdatawait_range(), which calls
> filemap_check_errors().
> If there have been any errors in the file recently, inside or outside
> the range, the latter will return an error which will propagate up.
> 
> > 
> > As for stray sync() clearing PG_error from underneath an application, that
> > shouldn't happen since filemap_fdatawait_keep_errors() doesn't clear errors
> > and is used by device flushing code (fdatawait_one_bdev(), 
> > wait_sb_inodes()).
> 
> filemap_fdatawait_keep_errors() calls __filemap_fdatawait_range() which
> clears PG_error on every page.
> What it doesn't do is call filemap_check_errors(), and so doesn't clear
> AS_ENOSPC or AS_EIO.
> 
> 

I think it's helpful to get a clear idea of what happens now in the face
of errors and what we expect to happen, and I don't quite have that yet:

--8<-
void page_endio(struct page *page, bool is_write, int err)
{
if (!is_write) {
if (!err) {
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
unlock_page(page);
} else {
if (err) {
SetPageError(page);
if (page->mapping)
mapping_set_error(page->mapping, err);
}
end_page_writeback(page);
}
}
--8<-

...not everything uses page_endio, but it's a good place to look since
we have both flavors of error handling in one place.

On a write error, we SetPageError and set the error in the mapping.

What I'm not clear on is:

1) what happens to the page at that point when we get a writeback error?
Does it just remain in-core and is allowed to service reads (assuming
that it was uptodate before)?

Can I redirty it and have it retry the write? Is there standard behavior
for this or is it just up to the whim of the filesystem?

I'll probably have questions about the read side as well, but for now it
looks like it's mostly used in an ad-hoc way to communicate errors
across subsystems (block to fs layer, for instance).
--
Jeff Layton 


Re: SCSI regression in 4.11

2017-02-27 Thread Stephen Hemminger
On Mon, 27 Feb 2017 15:30:30 -0800
Stephen Hemminger  wrote:

> Something in SCSI in 4.11 broke booting on Hyper-V Generation 2 VM with 8 
> VCPU and 4G of memory.
> Both Linus's current tree (4.11 pre-rc1) and linux-next fail in a similar 
> manner. It looks like some error
> in SCSI device detection because there is only a single device.
> 
> The offending commit causing the regression is:
> 
> $ git bisect bad
> e9c787e65c0c36529745be47d490d998b4b6e589 is the first bad commit
> commit e9c787e65c0c36529745be47d490d998b4b6e589
> Author: Christoph Hellwig 
> Date:   Mon Jan 2 21:55:26 2017 +0300
> 
> scsi: allocate scsi_cmnd structures as part of struct request
> 
> Rely on the new block layer functionality to allocate additional driver
> specific data behind struct request instead of implementing it in SCSI
> itѕelf.
> 
> Signed-off-by: Christoph Hellwig 
> Acked-by: Martin K. Petersen 
> Reviewed-by: Hannes Reinecke 
> Signed-off-by: Jens Axboe 
> 
> :04 04 6ff016fcdae227efeb19c1c301b17ccd7ea35da6 
> 70d79f99d9b79ecf4dccbe067fc697219f5c78da Mdrivers
> :04 04 a672ff52df8b2c211b3f98cae4a88d8a96ccde0b 
> 1aaaed7de0994f597c7f8290c722a0b4a7789429 Minclude
> 
> I checked and tree is current and up to date and includes 
> commit ee5242360424b9b967454e9183767323d10cf985
> Author: Christoph Hellwig 
> Date:   Tue Feb 21 10:04:55 2017 +0100
> 
> scsi: zero per-cmd driver data before each I/O
> 
> Kernel config is attached. It started with Ubuntu config, but then did 
> localmodconfig and pruned
> out unnecessary stuff.
> 

This problem I am seeing looks like the one addressed by:

Fixes: ee5242360424 ("scsi: zero per-cmd driver data before each I/O")

but that is already in linux-next.

Noticed another place where memset(of the data was being done not the extra 
bits.
Tried this, but didn't fix it either...


diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ba2286652ff6..7e0463e78ff4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1850,7 +1850,7 @@ static int scsi_mq_prep_fn(struct request *req)
 
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
-   sizeof(*cmd) - sizeof(cmd->req));
+   sizeof(*cmd) - sizeof(cmd->req) + sdev->host->hostt->cmd_size);
 
req->special = cmd;
 


Re: [LSF/MM TOPIC] do we really need PG_error at all?

2017-02-27 Thread Jeff Layton
On Mon, 2017-02-27 at 15:51 -0700, Andreas Dilger wrote:
> On Feb 27, 2017, at 8:07 AM, Jeff Layton  wrote:
> > 
> > On Mon, 2017-02-27 at 11:27 +1100, NeilBrown wrote:
> > > On Sun, Feb 26 2017, James Bottomley wrote:
> > > 
> > > > On Mon, 2017-02-27 at 08:03 +1100, NeilBrown wrote:
> > > > > On Sun, Feb 26 2017, James Bottomley wrote:
> > > > > 
> > > > > > [added linux-scsi and linux-block because this is part of our error
> > > > > > handling as well]
> > > > > > On Sun, 2017-02-26 at 09:42 -0500, Jeff Layton wrote:
> > > > > > > Proposing this as a LSF/MM TOPIC, but it may turn out to be me
> > > > > > > just not understanding the semantics here.
> > > > > > > 
> > > > > > > As I was looking into -ENOSPC handling in cephfs, I noticed that
> > > > > > > PG_error is only ever tested in one place [1]
> > > > > > > __filemap_fdatawait_range, which does this:
> > > > > > > 
> > > > > > >   if (TestClearPageError(page))
> > > > > > >   ret = -EIO;
> > > > > > > 
> > > > > > > This error code will override any AS_* error that was set in the
> > > > > > > mapping. Which makes me wonder...why don't we just set this error
> > > > > > > in the mapping and not bother with a per-page flag? Could we
> > > > > > > potentially free up a page flag by eliminating this?
> > > > > > 
> > > > > > Note that currently the AS_* codes are only set for write errors
> > > > > > not for reads and we have no mapping error handling at all for swap
> > > > > > pages, but I'm sure this is fixable.
> > > > > 
> > > > > How is a read error different from a failure to set PG_uptodate?
> > > > > Does PG_error suppress retries?
> > > > 
> > > > We don't do any retries in the code above the block layer (or at least
> > > > we shouldn't).
> > > 
> > > I was wondering about what would/should happen if a read request was
> > > re-issued for some reason.  Should the error flag on the page cause an
> > > immediate failure, or should it try again.
> > > If read-ahead sees a read-error on some future page, is it necessary to
> > > record the error so subsequent read-aheads don't notice the page is
> > > missing and repeatedly try to re-load it?
> > > When the application eventually gets to the faulty page, should a read
> > > be tried then, or is the read-ahead failure permanent?
> > > 
> > > 
> > > 
> > > > 
> > > > > > 
> > > > > > From the I/O layer point of view we take great pains to try to
> > > > > > pinpoint the error exactly to the sector.  We reflect this up by
> > > > > > setting the PG_error flag on the page where the error occurred.  If
> > > > > > we only set the error on the mapping, we lose that granularity,
> > > > > > because the mapping is mostly at the file level (or VMA level for
> > > > > > anon pages).
> > > > > 
> > > > > Are you saying that the IO layer finds the page in the bi_io_vec and
> > > > > explicitly sets PG_error,
> > > > 
> > > > I didn't say anything about the mechanism.  I think the function you're
> > > > looking for is fs/mpage.c:mpage_end_io().  layers below block indicate
> > > > the position in the request.  Block maps the position to bio and the
> > > > bio completion maps to page.  So the actual granularity seen in the
> > > > upper layer depends on how the page to bio mapping is done.
> > > 
> > > If the block layer is just returning the status at a per-bio level (which
> > > makes perfect sense), then this has nothing directly to do with the
> > > PG_error flag.
> > > 
> > > The page cache needs to do something with bi_error, but it isn't
> > > immediately clear that it needs to set PG_error.
> > > 
> > > > :q
> > > > > rather than just passing an error indication to bi_end_io ??  That
> > > > > would seem to be wrong as the page may not be in the page cache.
> > > > 
> > > > Usually pages in the mpage_end_io path are pinned, I think.
> > > > 
> > > > > So I guess I misunderstand you.
> > > > > 
> > > > > > 
> > > > > > So I think the question for filesystem people from us would be do
> > > > > > you care about this accuracy?  If it's OK just to know an error
> > > > > > occurred somewhere in this file, then perhaps we don't need it.
> > > > > 
> > > > > I had always assumed that a bio would either succeed or fail, and
> > > > > that no finer granularity could be available.
> > > > 
> > > > It does ... but a bio can be as small as a single page.
> > > > 
> > > > > I think the question here is: Do filesystems need the pagecache to
> > > > > record which pages have seen an IO error?
> > > > 
> > > > It's not just filesystems.  The partition code uses PageError() ... the
> > > > metadata code might as well (those are things with no mapping).  I'm
> > > > not saying we can't remove PG_error; I am saying it's not going to be
> > > > quite as simple as using the AS_ flags.
> > > 
> > > The partition code could use PageUptodate().
> > > mpage_end_io() calls page_endio() on each page, and on read error that
> > > calls:
> > > 
> > >   ClearPageUptodate(page);
> 

Re: [PATCHv2 5/5] scsi: make asynchronous aborts mandatory

2017-02-27 Thread Bart Van Assche
On Wed, 2017-02-22 at 17:07 +0100, Hannes Reinecke wrote:
> There hasn't been any reports for HBAs where asynchronous abort
> would not work, so we should make it mandatory and remove
> the fallback.

Reviewed-by: Bart Van Assche 

[PATCH v2] libiscsi: add lock around task lists to fix list corruption regression

2017-02-27 Thread Chris Leech
There's a rather long standing regression from the commit
"libiscsi: Reduce locking contention in fast path"

Depending on iSCSI target behavior, it's possible to hit the case in
iscsi_complete_task where the task is still on a pending list
(!list_empty(>running)).  When that happens the task is removed
from the list while holding the session back_lock, but other task list
modification occur under the frwd_lock.  That leads to linked list
corruption and eventually a panicked system.

Rather than back out the session lock split entirely, in order to try
and keep some of the performance gains this patch adds another lock to
maintain the task lists integrity.

Major enterprise supported kernels have been backing out the lock split
for while now, thanks to the efforts at IBM where a lab setup has the
most reliable reproducer I've seen on this issue.  This patch has been
tested there successfully.

v2: changed WARN_ONCE to pr_debug_once

Signed-off-by: Chris Leech 
Fixes: 659743b02c41 ("[SCSI] libiscsi: Reduce locking contention in fast path")
Reported-by: Prashantha Subbarao 
Reviewed-by: Guilherme G. Piccoli 
Cc:  # v3.15+
---
 drivers/scsi/libiscsi.c | 26 +-
 include/scsi/libiscsi.h |  1 +
 2 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 834d1212b6d5..3fca34a675af 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, 
int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
 
-   if (!list_empty(>running))
+   spin_lock_bh(>taskqueuelock);
+   if (!list_empty(>running)) {
+   pr_debug_once("%s while task on list", __func__);
list_del_init(>running);
+   }
+   spin_unlock_bh(>taskqueuelock);
 
if (conn->task == task)
conn->task = NULL;
@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct 
iscsi_hdr *hdr,
if (session->tt->xmit_task(task))
goto free_task;
} else {
+   spin_lock_bh(>taskqueuelock);
list_add_tail(>running, >mgmtqueue);
+   spin_unlock_bh(>taskqueuelock);
iscsi_conn_queue_work(conn);
}
 
@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
 * this may be on the requeue list already if the xmit_task callout
 * is handling the r2ts while we are adding new ones
 */
+   spin_lock_bh(>taskqueuelock);
if (list_empty(>running))
list_add_tail(>running, >requeue);
+   spin_unlock_bh(>taskqueuelock);
iscsi_conn_queue_work(conn);
 }
 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
 * only have one nop-out as a ping from us and targets should not
 * overflow us with nop-ins
 */
+   spin_lock_bh(>taskqueuelock);
 check_mgmt:
while (!list_empty(>mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
 struct iscsi_task, running);
list_del_init(>task->running);
+   spin_unlock_bh(>taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */
spin_lock_bh(>session->back_lock);
__iscsi_put_task(conn->task);
spin_unlock_bh(>session->back_lock);
conn->task = NULL;
+   spin_lock_bh(>taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+   spin_lock_bh(>taskqueuelock);
}
 
/* process pending command queue */
@@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running);
list_del_init(>task->running);
+   spin_unlock_bh(>taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY);
+   spin_lock_bh(>taskqueuelock);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES) {
+   spin_lock_bh(>taskqueuelock);
list_add_tail(>task->running,
  >cmdqueue);

Re: [PATCH] target/user: Add daynmic growing data area feature support

2017-02-27 Thread Mike Christie
On 02/22/2017 02:32 PM, Andy Grover wrote:
> On 02/17/2017 01:24 AM, lixi...@cmss.chinamobile.com wrote:
>> > From: Xiubo Li 
>> > 
>> > Currently for the TCMU, the ring buffer size is fixed to 64K cmd
>> > area + 1M data area, and this will be bottlenecks for high iops.
> Hi Xiubo, thanks for your work.
> 
> daynmic -> dynamic
> 
> Have you benchmarked this patch and determined what kind of iops
> improvement it allows? Do you see the data area reaching its
> fully-allocated size?
> 

I tested this patch with Venky's tcmu-runner rbd aio patches, with one
10 gig iscsi session, and for pretty basic fio direct io (64 -256K
read/writes with a queue depth of 64 numjobs between 1 and 4) tests read
throughput goes from about 80 to 500 MB/s. Write throughput is pretty
low at around 150 MB/s.

I did not hit the fully allocated size. I did not drive a lot of IO though.


Re: [LSF/MM TOPIC] do we really need PG_error at all?

2017-02-27 Thread NeilBrown
On Mon, Feb 27 2017, Andreas Dilger wrote:

>
> My thought is that PG_error is definitely useful for applications to get
> correct errors back when doing write()/sync_file_range() so that they know
> there is an error in the data that _they_ wrote, rather than receiving an
> error for data that may have been written by another thread, and in turn
> clearing the error from another thread so it *doesn't* know it had a write
> error.

It might be useful in that way, but it is not currently used that way.
Such usage would be a change in visible behaviour.

sync_file_range() calls filemap_fdatawait_range(), which calls
filemap_check_errors().
If there have been any errors in the file recently, inside or outside
the range, the latter will return an error which will propagate up.

>
> As for stray sync() clearing PG_error from underneath an application, that
> shouldn't happen since filemap_fdatawait_keep_errors() doesn't clear errors
> and is used by device flushing code (fdatawait_one_bdev(), wait_sb_inodes()).

filemap_fdatawait_keep_errors() calls __filemap_fdatawait_range() which
clears PG_error on every page.
What it doesn't do is call filemap_check_errors(), and so doesn't clear
AS_ENOSPC or AS_EIO.

NeilBrown


signature.asc
Description: PGP signature


[ANNOUNCE]: Broadcom (Emulex) FC Target driver - efct

2017-02-27 Thread James Smart
I'd like to announce the availability of the Broadcom (Emulex) FC Target 
driver - efct.
This driver has been part of the Emulex OneCore Storage SDK tool kit for 
Emulex
SLI-4 adapters. The SLI-4 adapters support 16Gb/s and higher adapters. 
Although this

kit has supported FCoE in the past, it is currently limited to FC support.

This driver provides the following:
- Target mode operation:
  - Functional with LIO-based interfaces
  - Extensive use of hw offloads such as auto-xfer_rdy, auto-rsp, cmd 
cpu spreading

  - High login mode - thousands of logins
  - T-10 DIF/PI support  (inline and separate)
  - NPIV support
- Concurrent Initiator support if needed
- Discovery engine has F_Port and fabric services emulation.
- Extended mgmt interfaces:
  - firmware dump api, including dump to host memory for faster dumps
  - Healthcheck operations and watchdogs
- Extended driver behaviors such as:
  - polled mode operation
  - multi-queue: cpu, roundrobin, or priority  (but not tied  to scsi-mq)
  - long chained sgl's
  - extensive internal logging and statistics
  - Tuning parameters on modes and resource allocation to different 
features


Broadcom is looking to upstream this driver and would like review and 
feedback.

The driver may be found at the following git repository:
g...@gitlab.com:jsmart/efct-Emulex_FC_Target.git


Some of the key questions we have are with lpfc :
1) Coexistence vs integration
Currently, the efct driver maps to a different set of PCI ids than lpfc.
It's very clear there's an overlap with lpfc, both on SLI-4 hw as well 
as initiator support.
Although target mode support can be simplistically added to lpfc, what 
we've found is
that doing so means a lot of tradeoffs. Some of the target mode 
features, when enabled,

impact the initiator support and how it would operate.

2) SLI-3 support
lpfc provides SLI-3 support so that all FC adapters are supported, 
including the older ones.
The form of the driver, based on its history, is SLI-3 with SLI-3 
adapted to SLI-4 at the point

it hits the hardware. efct does not support SLI-3.

3) complexity of configuration knobs caused by the kitchen-sink of 
features in lpfc ?
we are pushing the limit on needing per-instance attributes rather than 
global module

parameters.

-- james





[bug report] scsi: lpfc: NVME Target: Receive buffer updates

2017-02-27 Thread Dan Carpenter
Hello James Smart,

This is a semi-automatic email about new static checker warnings.

The patch 2d7dbc4c2775: "scsi: lpfc: NVME Target: Receive buffer 
updates" from Feb 12, 2017, leads to the following Smatch complaint:

drivers/scsi/lpfc/lpfc_sli.c:15194 lpfc_mrq_create()
 warn: variable dereferenced before check 'hrq' (see line 15188)

drivers/scsi/lpfc/lpfc_sli.c
 15181  cnt = 0;
 15182  
 15183  for (idx = 0; idx < numrq; idx++) {
 15184  hrq = hrqp[idx];
 15185  drq = drqp[idx];
 15186  cq  = cqp[idx];
 15187  
 15188  if (hrq->entry_count != drq->entry_count) {

Dereferences.

 15189  status = -EINVAL;
 15190  goto out;
 15191  }
 15192  
 15193  /* sanity check on queue memory */
 15194  if (!hrq || !drq || !cq) {
 ^^^
Too late.

 15195  status = -ENODEV;
 15196  goto out;

regards,
dan carpenter


Re: [LSF/MM TOPIC] do we really need PG_error at all?

2017-02-27 Thread Andreas Dilger
On Feb 27, 2017, at 8:07 AM, Jeff Layton  wrote:
> 
> On Mon, 2017-02-27 at 11:27 +1100, NeilBrown wrote:
>> On Sun, Feb 26 2017, James Bottomley wrote:
>> 
>>> On Mon, 2017-02-27 at 08:03 +1100, NeilBrown wrote:
 On Sun, Feb 26 2017, James Bottomley wrote:
 
> [added linux-scsi and linux-block because this is part of our error
> handling as well]
> On Sun, 2017-02-26 at 09:42 -0500, Jeff Layton wrote:
>> Proposing this as a LSF/MM TOPIC, but it may turn out to be me
>> just not understanding the semantics here.
>> 
>> As I was looking into -ENOSPC handling in cephfs, I noticed that
>> PG_error is only ever tested in one place [1]
>> __filemap_fdatawait_range, which does this:
>> 
>>  if (TestClearPageError(page))
>>  ret = -EIO;
>> 
>> This error code will override any AS_* error that was set in the
>> mapping. Which makes me wonder...why don't we just set this error
>> in the mapping and not bother with a per-page flag? Could we
>> potentially free up a page flag by eliminating this?
> 
> Note that currently the AS_* codes are only set for write errors
> not for reads and we have no mapping error handling at all for swap
> pages, but I'm sure this is fixable.
 
 How is a read error different from a failure to set PG_uptodate?
 Does PG_error suppress retries?
>>> 
>>> We don't do any retries in the code above the block layer (or at least
>>> we shouldn't).
>> 
>> I was wondering about what would/should happen if a read request was
>> re-issued for some reason.  Should the error flag on the page cause an
>> immediate failure, or should it try again.
>> If read-ahead sees a read-error on some future page, is it necessary to
>> record the error so subsequent read-aheads don't notice the page is
>> missing and repeatedly try to re-load it?
>> When the application eventually gets to the faulty page, should a read
>> be tried then, or is the read-ahead failure permanent?
>> 
>> 
>> 
>>> 
> 
> From the I/O layer point of view we take great pains to try to
> pinpoint the error exactly to the sector.  We reflect this up by
> setting the PG_error flag on the page where the error occurred.  If
> we only set the error on the mapping, we lose that granularity,
> because the mapping is mostly at the file level (or VMA level for
> anon pages).
 
 Are you saying that the IO layer finds the page in the bi_io_vec and
 explicitly sets PG_error,
>>> 
>>> I didn't say anything about the mechanism.  I think the function you're
>>> looking for is fs/mpage.c:mpage_end_io().  layers below block indicate
>>> the position in the request.  Block maps the position to bio and the
>>> bio completion maps to page.  So the actual granularity seen in the
>>> upper layer depends on how the page to bio mapping is done.
>> 
>> If the block layer is just returning the status at a per-bio level (which
>> makes perfect sense), then this has nothing directly to do with the
>> PG_error flag.
>> 
>> The page cache needs to do something with bi_error, but it isn't
>> immediately clear that it needs to set PG_error.
>> 
>>> :q
 rather than just passing an error indication to bi_end_io ??  That
 would seem to be wrong as the page may not be in the page cache.
>>> 
>>> Usually pages in the mpage_end_io path are pinned, I think.
>>> 
 So I guess I misunderstand you.
 
> 
> So I think the question for filesystem people from us would be do
> you care about this accuracy?  If it's OK just to know an error
> occurred somewhere in this file, then perhaps we don't need it.
 
 I had always assumed that a bio would either succeed or fail, and
 that no finer granularity could be available.
>>> 
>>> It does ... but a bio can be as small as a single page.
>>> 
 I think the question here is: Do filesystems need the pagecache to
 record which pages have seen an IO error?
>>> 
>>> It's not just filesystems.  The partition code uses PageError() ... the
>>> metadata code might as well (those are things with no mapping).  I'm
>>> not saying we can't remove PG_error; I am saying it's not going to be
>>> quite as simple as using the AS_ flags.
>> 
>> The partition code could use PageUptodate().
>> mpage_end_io() calls page_endio() on each page, and on read error that
>> calls:
>> 
>>  ClearPageUptodate(page);
>>  SetPageError(page);
>> 
>> are both of these necessary?
>> 
> 
>> fs/buffer.c can use several bios to read a single page.
>> If any one returns an error, PG_error is set.  When all of them have
>> completed, if PG_error is clear, PG_uptodate is then set.
>> This is an opportunistic use of PG_error, rather than an essential use.
>> It could be "fixed", and would need to be fixed if we were to deprecate
>> use of PG_error for read errors.
>> There are probably other usages like this.
>> 
> 
> Ok, I think 

Re: [PATCHv2 3/5] scsi_error: do not escalate failed EH command

2017-02-27 Thread Bart Van Assche
On Wed, 2017-02-22 at 17:07 +0100, Hannes Reinecke wrote:
> When a command is sent as part of the error handling there
> is not point whatsoever to start EH escalation when that
> command fails; we are _already_ in the error handler,
> and the escalation is about to commence anyway.
> So just call 'scsi_try_to_abort_cmd()' to abort outstanding
> commands and let the main EH routine handle the rest.

Reviewed-by: Bart Van Assche 

Re: [PATCHv2 4/5] scsi: make scsi_eh_scmd_add() always succeed

2017-02-27 Thread Bart Van Assche
On Wed, 2017-02-22 at 17:07 +0100, Hannes Reinecke wrote:
>   spin_lock_irqsave(shost->host_lock, flags);
> + WARN_ON(shost->shost_state != SHOST_RUNNING &&
> + shost->shost_state != SHOST_CANCEL &&
> + shost->shost_state != SHOST_RECOVERY &&
> + shost->shost_state != SHOST_CANCEL_RECOVERY);
>   if (scsi_host_set_state(shost, SHOST_RECOVERY))
> - if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
> - goto out_unlock;
> + scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY);

Please issue a warning if the second scsi_host_set_state() fails. And once
that failure triggers a warning, I don't think we need the newly added
WARN_ON() statement anymore. Something else that surprised me is that you
consistently use WARN_ON() instead of WARN_ON_ONCE() in this patch?
  
Otherwise this patch looks fine to me.

Bart.

Re: Kernel crash with 4.10 trying to remove scsi disks

2017-02-27 Thread Farhan Ali

Hi Johannes,

I don't think this crash was newly introduced with 4.10. I had another 
run with 4.10 and that one completed successfully. So not sure what is 
causing this kernel crash.


Thanks
Farhan

On 02/27/2017 05:35 AM, Johannes Thumshirn wrote:

On 02/24/2017 04:07 PM, Farhan Ali wrote:

Hello,

I have noticed a kernel crash with 4.10 kernel in our s390 environment,
running a test trying to remove scsi disks. Here is a snippet of the
kernel crash message:


Hi Farhan,

Is this crash newly introduced with 4.10? Do you maybe have a chance to
bisect it?

Thanks,
Johannes





Re: [PATCHv2 0/5] SCSI EH cleanup

2017-02-27 Thread Bart Van Assche
On Fri, 2017-02-24 at 08:15 +0100, Hannes Reinecke wrote:
> scsi_abort_command() (ie the call which triggers async aborts) is only
> called if the .eh_timed_out() callback returns BLK_EH_NOT_HANDLED.
> So during reconnects we will not schedule any calls to async aborts.
> We _might_ have scheduled an async abort prior to a call to
> srp_reconnect_rport(), but that would be equivalent with calling
> srp_reconnect_rport() with commands still in flight.
> Is that even possible?
> If so, how do you handle these commands after reconnect returns?
> Any I_T_L nexus will be gone from the target, right?

srp_reconnect_rport() can proceed while commands are in flight. What will
happen is that no response will be received for the commands that are in
flight and hence that these commands will time out at the initiator side.
However, any newly submitted commands will be processed normally.

Bart.

Re: [PATCHv2 2/5] scsi: make eh_eflags persistent

2017-02-27 Thread Bart Van Assche
On Wed, 2017-02-22 at 17:07 +0100, Hannes Reinecke wrote:
> To detect if a failed command has been retried we must not
> clear scmd->eh_eflags when EH finishes.
> The flag should be persistent throughout the lifetime
> of the command.

Hello Hannes,

Is it on purpose that this patch does not remove the following statement from
drivers/scsi/libsas/sas_scsi_host.c?

cmd->eh_eflags = 0;

Thanks,

Bart.

[PATCH] scsi: lpfc: use div_u64 for 64-bit division

2017-02-27 Thread Arnd Bergmann
The new debugfs output causes a link error on 32-bit architectures:

ERROR: "__aeabi_uldivmod" [drivers/scsi/lpfc/lpfc.ko] undefined!

This code is not performance critical, so we can simply use div_u64().

Fixes: bd2cdd5e400f ("scsi: lpfc: NVME Initiator: Add debugfs support")
Fixes: 2b65e18202fd ("scsi: lpfc: NVME Target: Add debugfs support")
Signed-off-by: Arnd Bergmann 
---
 drivers/scsi/lpfc/lpfc_debugfs.c | 64 
 1 file changed, 32 insertions(+), 32 deletions(-)

diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 599fde4ea8b1..47c67bf0514e 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -873,8 +873,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char 
*buf, int size)
len += snprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
-   phba->ktime_seg1_total /
-   phba->ktime_data_samples,
+   div_u64(phba->ktime_seg1_total,
+   phba->ktime_data_samples),
phba->ktime_seg1_min,
phba->ktime_seg1_max);
len += snprintf(
@@ -884,8 +884,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char 
*buf, int size)
len += snprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
-   phba->ktime_seg2_total /
-   phba->ktime_data_samples,
+   div_u64(phba->ktime_seg2_total,
+   phba->ktime_data_samples),
phba->ktime_seg2_min,
phba->ktime_seg2_max);
len += snprintf(
@@ -895,8 +895,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char 
*buf, int size)
len += snprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
-   phba->ktime_seg3_total /
-   phba->ktime_data_samples,
+   div_u64(phba->ktime_seg3_total,
+   phba->ktime_data_samples),
phba->ktime_seg3_min,
phba->ktime_seg3_max);
len += snprintf(
@@ -906,17 +906,17 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, 
char *buf, int size)
len += snprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
-   phba->ktime_seg4_total /
-   phba->ktime_data_samples,
+   div_u64(phba->ktime_seg4_total,
+   phba->ktime_data_samples),
phba->ktime_seg4_min,
phba->ktime_seg4_max);
len += snprintf(
buf + len, PAGE_SIZE - len,
"Total IO avg time: %08lld\n",
-   ((phba->ktime_seg1_total +
+   div_u64(phba->ktime_seg1_total +
phba->ktime_seg2_total  +
phba->ktime_seg3_total +
-   phba->ktime_seg4_total) /
+   phba->ktime_seg4_total,
phba->ktime_data_samples));
return len;
}
@@ -935,8 +935,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char 
*buf, int size)
"cmd pass to NVME Layer\n");
len += snprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
-   phba->ktime_seg1_total /
-   phba->ktime_data_samples,
+   div_u64(phba->ktime_seg1_total,
+   phba->ktime_data_samples),
phba->ktime_seg1_min,
phba->ktime_seg1_max);
len += snprintf(buf + len, PAGE_SIZE-len,
@@ -944,8 +944,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char 
*buf, int size)
"-to- Driver rcv cmd OP (action)\n");
len += snprintf(buf + len, PAGE_SIZE-len,
"avg:%08lld min:%08lld max %08lld\n",
-   phba->ktime_seg2_total /
-   phba->ktime_data_samples,
+   div_u64(phba->ktime_seg2_total,
+   phba->ktime_data_samples),
phba->ktime_seg2_min,
phba->ktime_seg2_max);
len += snprintf(buf + len, PAGE_SIZE-len,
@@ -953,8 +953,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char 
*buf, int size)
"Firmware WQ doorbell: cmd\n");
len += snprintf(buf + len, 

[PATCH] scsi: lpfc: use proper format string for dma_addr_t

2017-02-27 Thread Arnd Bergmann
dma_addr_t may be either u32 or u64, depending on the kernel configuration,
and we get a warning for the 32-bit case:

drivers/scsi/lpfc/lpfc_nvme.c: In function 'lpfc_nvme_ls_req':
drivers/scsi/lpfc/lpfc_logmsg.h:52:52: error: format '%llu' expects argument of 
type 'long long unsigned int', but argument 11 has type 'dma_addr_t {aka 
unsigned int}' [-Werror=format=]
drivers/scsi/lpfc/lpfc_logmsg.h:52:52: error: format '%llu' expects argument of 
type 'long long unsigned int', but argument 12 has type 'dma_addr_t {aka 
unsigned int}' [-Werror=format=]
drivers/scsi/lpfc/lpfc_nvme.c: In function 'lpfc_nvme_ls_abort':
drivers/scsi/lpfc/lpfc_logmsg.h:52:52: error: format '%llu' expects argument of 
type 'long long unsigned int', but argument 11 has type 'dma_addr_t {aka 
unsigned int}' [-Werror=format=]
drivers/scsi/lpfc/lpfc_logmsg.h:52:52: error: format '%llu' expects argument of 
type 'long long unsigned int', but argument 12 has type 'dma_addr_t {aka 
unsigned int}' [-Werror=format=]

printk has a special "%pad" format string that passes the dma address by
reference to solve this problem.

Fixes: 01649561a8b4 ("scsi: lpfc: NVME Initiator: bind to nvme_fc api")
Signed-off-by: Arnd Bergmann 
---
 drivers/scsi/lpfc/lpfc_nvme.c | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 625b6589a34d..609a908ea9db 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -457,11 +457,11 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 "6051 ENTER.  lport %p, rport %p lsreq%p rqstlen:%d "
-"rsplen:%d %llux %llux\n",
+"rsplen:%d %pad %pad\n",
 pnvme_lport, pnvme_rport,
 pnvme_lsreq, pnvme_lsreq->rqstlen,
-pnvme_lsreq->rsplen, pnvme_lsreq->rqstdma,
-pnvme_lsreq->rspdma);
+pnvme_lsreq->rsplen, _lsreq->rqstdma,
+_lsreq->rspdma);
 
vport->phba->fc4NvmeLsRequests++;
 
@@ -527,11 +527,11 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Expand print to include key fields. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
 "6040 ENTER.  lport %p, rport %p lsreq %p rqstlen:%d "
-"rsplen:%d %llux %llux\n",
+"rsplen:%d %pad %pad\n",
 pnvme_lport, pnvme_rport,
 pnvme_lsreq, pnvme_lsreq->rqstlen,
-pnvme_lsreq->rsplen, pnvme_lsreq->rqstdma,
-pnvme_lsreq->rspdma);
+pnvme_lsreq->rsplen, _lsreq->rqstdma,
+_lsreq->rspdma);
 
/*
 * Lock the ELS ring txcmplq and build a local list of all ELS IOs
-- 
2.9.0



Re: [PATCH] scsi_error: count medium access timeout only once per EH run

2017-02-27 Thread Ewan D. Milne
On Thu, 2017-02-23 at 11:27 +0100, Hannes Reinecke wrote:
> The current medium access timeout counter will be increased for
> each command, so if there are enough failed commands we'll hit
> the medium access timeout for even a single failure.
> Fix this by making the timeout per EH run, ie the counter will
> only be increased once per device and EH run.

So, this is good, the current implementation has a flaw in that
under certain conditions, a device will get offlined immediately,
(i.e. if there are a few medium access commands pending, and
they all timeout), which isn't what was intended.

It means, of course, that we will no longer detect cases like:

, , SUCCESS, SUCCESS, SUCCESS, 

as separate medium access timeouts, but I think the original
intent of Martin's change wasn't to operate on such a short
time-scale, am I right, Martin?

I made a few notes on the coding/implementation (below), but that
doesn't affect the functional change.  We should definitely change
what we have now, it is causing people problems.

> 
> Cc: Ewan Milne 
> Cc: Lawrence Oberman 
> Cc: Benjamin Block 
> Cc: Steffen Maier 
> Signed-off-by: Hannes Reinecke 
> ---
>  drivers/scsi/scsi_error.c |  2 ++
>  drivers/scsi/sd.c | 16 ++--
>  drivers/scsi/sd.h |  1 +
>  include/scsi/scsi.h   |  1 +
>  4 files changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
> index f2cafae..481ea1b 100644
> --- a/drivers/scsi/scsi_error.c
> +++ b/drivers/scsi/scsi_error.c
> @@ -58,6 +58,7 @@
>  static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
>  static int scsi_try_to_abort_cmd(struct scsi_host_template *,
>struct scsi_cmnd *);
> +static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn);
>  
>  /* called with shost->host_lock held */
>  void scsi_eh_wakeup(struct Scsi_Host *shost)
> @@ -249,6 +250,7 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
>   if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
>   eh_flag &= ~SCSI_EH_CANCEL_CMD;
>   scmd->eh_eflags |= eh_flag;
> + scsi_eh_action(scmd, NEEDS_RESET);

So here we are overloading the eh_disp argument with a flag to reset the
medium_access_reset variable.  James changed the calling sequence of
this function already to remove arguments, we could just add another
boolean parameter "reset".  scsi_driver.eh_action() would need it too.

>   list_add_tail(>eh_entry, >eh_cmd_q);
>   shost->host_failed++;
>   scsi_eh_wakeup(shost);
> diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
> index be535d4..cd9f290 100644
> --- a/drivers/scsi/sd.c
> +++ b/drivers/scsi/sd.c
> @@ -1696,12 +1696,21 @@ static int sd_pr_clear(struct block_device *bdev, u64 
> key)
>   *   the eh command is passed in eh_disp.  We're looking for devices that
>   *   fail medium access commands but are OK with non access commands like
>   *   test unit ready (so wrongly see the device as having a successful
> - *   recovery)
> + *   recovery).
> + *   We have to be careful to count a medium access failure only once
> + *   per SCSI EH run; there might be several timed out commands which
> + *   will cause the 'max_medium_access_timeouts' counter to trigger
> + *   after the first SCSI EH run already and set the device to offline.
>   **/
>  static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
>  {
>   struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
>  
> + if (eh_disp == NEEDS_RESET) {
> + /* New SCSI EH run, reset gate variable */
> + sdkp->medium_access_reset = 0;
> + return eh_disp;
> + }
>   if (!scsi_device_online(scmd->device) ||
>   !scsi_medium_access_command(scmd) ||
>   host_byte(scmd->result) != DID_TIME_OUT ||
> @@ -1715,7 +1724,10 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int 
> eh_disp)
>* process of recovering or has it suffered an internal failure
>* that prevents access to the storage medium.
>*/
> - sdkp->medium_access_timed_out++;
> + if (!sdkp->medium_access_reset) {
> + sdkp->medium_access_timed_out++;
> + sdkp->medium_access_reset++;
> + }

If we only increment sdkp->medium_access_reset when it was 0, then it
will only have the values 0 and 1, and does not need to have the full
unsigned int precision.  A single bit field is sufficient, in which
case the code would be:  sdkp->medium_access_reset = 1;

>  
>   /*
>* If the device keeps failing read/write commands but TEST UNIT
> diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
> index 4dac35e..19e0bab 100644
> --- a/drivers/scsi/sd.h
> +++ b/drivers/scsi/sd.h
> @@ -85,6 +85,7 @@ struct scsi_disk {
>   unsigned intphysical_block_size;
>   unsigned intmax_medium_access_timeouts;
>   unsigned int

how to unmap pages in an anonymous mmap?

2017-02-27 Thread Andy Grover
On 02/26/2017 09:59 PM, Xiubo Li wrote:
>> But, We likely don't want to release memory from the data area anyways
>> while active, in any case. How about if we set a timer when active
>> commands go to zero, and then reduce data area to some minimum if no new
>> cmds come in before timer expires?
> 
> If I understand correctly: for example, we have 1G(as the minimum)
> data area and all blocks have been allocated and mapped to runner's
> vma, then we extern it to 1G + 256M as needed. When there have no
> active cmds and after the timer expires, will it reduce the data area
> back to 1G ? And then should it release the reduced 256M data area's
> memories ?
> 
> If so, after kfree()ed the blocks' memories, it should also try to remove
> all the ptes which are mapping this page(like using the try_to_umap()),
> but something like try_to_umap() doesn't export for the modules.
> 
> Without ummaping the kfree()ed pages' ptes mentioned above, then
> the reduced 256M vma space couldn't be reused again for the runner
> process, because the runner has already do the mapping for the reduced
> vma space to some old physical pages(won't trigger new page fault
> again). Then there will be a hole, and the hole will be bigger and bigger.
> 
> Without ummaping the kfree()ed pages' ptes mentioned above, the
> pages' reference count (page_ref_dec(), which _inc()ed in page fault)
> couldn't be reduced back too.

Let's ask people who will know...

Hi linux-mm,

TCM-User (drivers/target/target_core_user.c) currently uses vmalloc()ed
memory to back a ring buffer that is mmaped by userspace.

We want to move to dynamically mapping pages into this region, and also
we'd like to unmap/free pages when idle. What's the right way to unmap?
I see unmap_mapping_range() but that mentions an underlying file, which
TCMU doesn't have. Or maybe zap_page_range()? But it's not exported.

Any advice?

Thanks in advance -- Regards -- Andy


Re: [PATCH] sd: close hole in > 2T device rejection when !CONFIG_LBDAF

2017-02-27 Thread Bart Van Assche
On Mon, 2017-02-27 at 11:13 -0600, Steve Magnani wrote:
> On 02/27/2017 10:13 AM, Bart Van Assche wrote:
> > Why are the two checks slightly different? Could the same code be used for
> > both checks?
> 
> The checks are different because with READ CAPACITY(16) a _really_ huge 
> device could report a max LBA so large that left-shifting it causes bits 
> to drop off the end. That's not an issue with READ CAPACITY(10) because 
> at most the 32-bit LBA reported by the device will become a 35-bit value 
> (since the max supported block size is 4096 == (512 << 3)).

Sorry but I still don't understand why the two checks are different. How about
the (untested) patch below? The approach below avoids that the check is
duplicated and - at least in my opinion - results in code that is easier to 
read.

Thanks,

Bart.


diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cb6e68dd6df0..3533d1e46bde 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2082,6 +2082,16 @@ static void read_capacity_error(struct scsi_disk *sdkp, 
struct scsi_device *sdp,
sdkp->capacity = 0; /* unknown mapped to zero - as usual */
 }
 
+/*
+ * Check whether or not logical_to_sectors(sdp, lba) will overflow.
+ */
+static bool lba_too_large(u64 lba, u32 logical_block_size)
+{
+   int shift = sizeof(sector_t) * 8 + 9 - ilog2(logical_block_size);
+
+   return shift >= 0 && shift < 64 && lba >= (1ULL << shift);
+}
+
 #define RC16_LEN 32
 #if RC16_LEN > SD_BUF_SIZE
 #error RC16_LEN must not be more than SD_BUF_SIZE
@@ -2154,7 +2164,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, 
struct scsi_device *sdp,
return -ENODEV;
}
 
-   if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xULL)) {
+   if (lba_too_large(lba + 1, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2243,7 +2253,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, 
struct scsi_device *sdp,
return sector_size;
}
 
-   if ((sizeof(sdkp->capacity) == 4) && (lba == 0x)) {
+   if (lba_too_large(lba + 1ULL, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
-- 
2.11.0


Re: [PATCH] sd: close hole in > 2T device rejection when !CONFIG_LBDAF

2017-02-27 Thread Steve Magnani

Hi Bart -

Thanks for taking the time to look this over.

On 02/27/2017 10:13 AM, Bart Van Assche wrote:

On Mon, 2017-02-27 at 09:22 -0600, Steven J. Magnani wrote:

@@ -2122,7 +2122,10 @@ static int read_capacity_16(struct scsi_
return -ENODEV;
}
  
-	if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xULL)) {

+   /* Make sure logical_to_sectors() won't overflow */
+   lba_in_sectors = lba << (ilog2(sector_size) - 9);
+   if ((sizeof(sdkp->capacity) == 4) &&
+   ((lba >= 0xULL) || (lba_in_sectors >= 0xULL))) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2162,6 +2165,7 @@ static int read_capacity_10(struct scsi_
int the_result;
int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
sector_t lba;
+   unsigned long long lba_in_sectors;
unsigned sector_size;
  
  	do {

@@ -2208,7 +2212,10 @@ static int read_capacity_10(struct scsi_
return sector_size;
}
  
-	if ((sizeof(sdkp->capacity) == 4) && (lba == 0x)) {

+   /* Make sure logical_to_sectors() won't overflow */
+   lba_in_sectors = ((unsigned long long) lba) << (ilog2(sector_size) - 9);
+   if ((sizeof(sdkp->capacity) == 4) &&
+   (lba_in_sectors >= 0xULL)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");

Why are the two checks slightly different? Could the same code be used for
both checks?
The checks are different because with READ CAPACITY(16) a _really_ huge 
device could report a max LBA so large that left-shifting it causes bits 
to drop off the end. That's not an issue with READ CAPACITY(10) because 
at most the 32-bit LBA reported by the device will become a 35-bit value 
(since the max supported block size is 4096 == (512 << 3)).



BTW, using the macro below would make the above checks less
verbose and easier to read:

/*
  * Test whether the result of a shift-left operation would be larger than
  * what fits in a variable with the type of @a.
  */
#define shift_left_overflows(a, b)  \
({  \
typeof(a) _minus_one = -1LL;\
typeof(a) _plus_one = 1;\
bool _a_is_signed = _minus_one < 0;  \
int _shift = sizeof(a) * 8 - ((b) + _a_is_signed);  \
_shift < 0 || ((a) & ~((_plus_one << _shift) - 1)) != 0;\
})

Bart.
Perhaps but I am not a fan of putting braces in non-obvious places such 
as within array subscripts (which I've encountered recently) or 
conditional expressions, which is what this amounts to.


Regards,

 Steven J. Magnani   "I claim this network for MARS!
 www.digidescorp.com  Earthling, return my space modulator!"

 #include 



[bug report] scsi: lpfc: NVME Target: Base modifications

2017-02-27 Thread Dan Carpenter
Hello James Smart,

This is a semi-automatic email about new static checker warnings.

The patch f358dd0ca26c: "scsi: lpfc: NVME Target: Base modifications" 
from Feb 12, 2017, leads to the following Smatch complaint:

drivers/scsi/lpfc/lpfc_mem.c:650 lpfc_sli4_nvmet_alloc()
 warn: variable dereferenced before check 'dma_buf->iocbq' (see line 
649)

drivers/scsi/lpfc/lpfc_mem.c
   648  dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
   649  dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
^
Dereference.

   650  if (!dma_buf->iocbq) {
 ^^
Check.

   651  kfree(dma_buf->context);
   652  pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,

regards,
dan carpenter


[bug report] scsi: qedf: Add QLogic FastLinQ offload FCoE driver framework.

2017-02-27 Thread Dan Carpenter
Hello Dupuis, Chad,

This is a semi-automatic email about new static checker warnings.

The patch 61d8658b4a43: "scsi: qedf: Add QLogic FastLinQ offload FCoE 
driver framework." from Feb 15, 2017, leads to the following Smatch 
complaint:

drivers/scsi/qedf/qedf_main.c:1565 qedf_vport_destroy()
 warn: variable dereferenced before check 'vn_port->host' (see line 
1551)

drivers/scsi/qedf/qedf_main.c
  1550  /* Detach from scsi-ml */
  1551  fc_remove_host(vn_port->host);
   ^
Dereferenced inside function.

  1552  scsi_remove_host(vn_port->host);
  1553  
  1554  /*
  1555   * Only try to release the exchange manager if the vn_port
  1556   * configuration is complete.
  1557   */
  1558  if (vn_port->state == LPORT_ST_READY)
  1559  fc_exch_mgr_free(vn_port);
  1560  
  1561  /* Free memory used by statistical counters */
  1562  fc_lport_free_stats(vn_port);
  1563  
  1564  /* Release Scsi_Host */
  1565  if (vn_port->host)
^
Too late.

  1566  scsi_host_put(vn_port->host);
  1567  

regards,
dan carpenter


[bug report] scsi: lpfc: NVME Initiator: Merge into FC discovery

2017-02-27 Thread Dan Carpenter
Hello James Smart,

This is a semi-automatic email about new static checker warnings.

The patch a0f2d3ef374f: "scsi: lpfc: NVME Initiator: Merge into FC 
discovery" from Feb 12, 2017, leads to the following Smatch complaint:

drivers/scsi/lpfc/lpfc_ct.c:943 lpfc_cmpl_ct_cmd_gft_id()
 error: we previously assumed 'ndlp' could be null (see line 928)

drivers/scsi/lpfc/lpfc_ct.c
   927  ndlp = lpfc_findnode_did(vport, did);
   928  if (ndlp) {

Check.

   929  /* The bitmask value for FCP and NVME FCP types 
is
   930   * the same because they are 32 bits distant 
from
   931   * each other in word0 and word0.
   932   */
   933  if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK)
   934  ndlp->nlp_fc4_type |= NLP_FC4_FCP;
   935  if (fc4_data_1 &  LPFC_FC4_TYPE_BITMASK)
   936  ndlp->nlp_fc4_type |= NLP_FC4_NVME;
   937  lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
   938   "3064 Setting ndlp %p, DID 
x%06x with "
   939   "FC4 x%08x, Data: x%08x 
x%08x\n",
   940   ndlp, did, ndlp->nlp_fc4_type,
   941   FC_TYPE_FCP, FC_TYPE_NVME);
   942  }
   943  ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;

Not checked.

   944  lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
   945  lpfc_issue_els_prli(vport, ndlp, 0);

drivers/scsi/lpfc/lpfc_nvmet.c:1694 lpfc_nvmet_unsol_issue_abort()
error: we previously assumed 'ndlp' could be null (see line 1690)

drivers/scsi/lpfc/lpfc_nvmet.c
  1688  
  1689  ndlp = lpfc_findnode_did(phba->pport, sid);
  1690  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
^
Check.

  1691  ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
  1692  (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
  1693  atomic_inc(>xmt_abort_rsp_error);
  1694  lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
  1695  "6134 Drop ABTS - wrong NDLP state 
x%x.\n",
  1696  ndlp->nlp_state);
^^^
Potential Oops.

  1697  
  1698  /* No failure to an ABTS request. */
  1699  return 0;
  1700  }

drivers/scsi/lpfc/lpfc_nvmet.c:1792 lpfc_nvmet_sol_fcp_issue_abort()
error: we previously assumed 'ndlp' could be null (see line 1788)

drivers/scsi/lpfc/lpfc_nvmet.c
  1786  
  1787  ndlp = lpfc_findnode_did(phba->pport, sid);
  1788  if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
^
Check.

  1789  ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
  1790  (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
  1791  atomic_inc(>xmt_abort_rsp_error);
  1792  lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
  1793  "6160 Drop ABTS - wrong NDLP state 
x%x.\n",
  1794  ndlp->nlp_state);
^^^
Potential Oops.

  1795  
  1796  /* No failure to an ABTS request. */
  1797  return 0;
  1798  }

regards,
dan carpenter


Re: [PATCH] sd: close hole in > 2T device rejection when !CONFIG_LBDAF

2017-02-27 Thread Bart Van Assche
On Mon, 2017-02-27 at 09:22 -0600, Steven J. Magnani wrote:
> @@ -2122,7 +2122,10 @@ static int read_capacity_16(struct scsi_
>   return -ENODEV;
>   }
>  
> - if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xULL)) {
> + /* Make sure logical_to_sectors() won't overflow */
> + lba_in_sectors = lba << (ilog2(sector_size) - 9);
> + if ((sizeof(sdkp->capacity) == 4) &&
> + ((lba >= 0xULL) || (lba_in_sectors >= 0xULL))) {
>   sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
>   "kernel compiled with support for large block "
>   "devices.\n");
> @@ -2162,6 +2165,7 @@ static int read_capacity_10(struct scsi_
>   int the_result;
>   int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
>   sector_t lba;
> + unsigned long long lba_in_sectors;
>   unsigned sector_size;
>  
>   do {
> @@ -2208,7 +2212,10 @@ static int read_capacity_10(struct scsi_
>   return sector_size;
>   }
>  
> - if ((sizeof(sdkp->capacity) == 4) && (lba == 0x)) {
> + /* Make sure logical_to_sectors() won't overflow */
> + lba_in_sectors = ((unsigned long long) lba) << (ilog2(sector_size) - 9);
> + if ((sizeof(sdkp->capacity) == 4) &&
> + (lba_in_sectors >= 0xULL)) {
>   sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
>   "kernel compiled with support for large block "
>   "devices.\n");

Why are the two checks slightly different? Could the same code be used for
both checks? BTW, using the macro below would make the above checks less
verbose and easier to read:

/*
 * Test whether the result of a shift-left operation would be larger than
 * what fits in a variable with the type of @a.
 */
#define shift_left_overflows(a, b)  \
({  \
typeof(a) _minus_one = -1LL;\
typeof(a) _plus_one = 1;\
bool _a_is_signed = _minus_one < 0; \
int _shift = sizeof(a) * 8 - ((b) + _a_is_signed);  \
_shift < 0 || ((a) & ~((_plus_one << _shift) - 1)) != 0;\
})

Bart.

[bug report] scsi: lpfc: NVME Initiator: Base modifications

2017-02-27 Thread Dan Carpenter
Hello James Smart,

The patch 895427bd012c: "scsi: lpfc: NVME Initiator: Base
modifications" from Feb 12, 2017, leads to the following static
checker warning:

[  Heck...  I just decided to report all the static checker warnings for
   this file.  - dan ]

drivers/scsi/lpfc/lpfc_hbadisc.c:316 lpfc_dev_loss_tmo_handler()
warn: we tested 'vport->load_flag & 2' before and it was 'false'

   248  /* Don't defer this if we are in the process of deleting the 
vport
   249   * or unloading the driver. The unload will cleanup the node
   250   * appropriately we just need to cleanup the ndlp rport info 
here.
   251   */
   252  if (vport->load_flag & FC_UNLOADING) {
   253  if (ndlp->nlp_sid != NLP_NO_SID) {
   254  /* flush the target */
   255  lpfc_sli_abort_iocb(vport,
   256  
>sli.sli3_ring[LPFC_FCP_RING],
   257  ndlp->nlp_sid, 0, 
LPFC_CTX_TGT);
   258  }
   259  put_node = rdata->pnode != NULL;
   260  rdata->pnode = NULL;
   261  ndlp->rport = NULL;
   262  if (put_node)
   263  lpfc_nlp_put(ndlp);
   264  put_device(>dev);
   265  
   266  return fcf_inuse;
   267  }

[ snip ]


   315  
   316  if (!(vport->load_flag & FC_UNLOADING) &&
^^
Delete this dead code.

   317  !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
   318  !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
   319  (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
   320  (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&  2201   
   /* Check the FCF record against the connection list */
   321  (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
   322  lpfc_disc_state_machine(vport, ndlp, NULL, 
NLP_EVT_DEVICE_RM);
   323  
   324  return fcf_inuse;
   325  }

drivers/scsi/lpfc/lpfc_hbadisc.c:701 lpfc_work_done()
warn: test_bit() takes a bit number

   698  if (pring->flag & LPFC_STOP_IOCB_EVENT) {
   699  pring->flag |= LPFC_DEFERRED_RING_EVENT;
   700  /* Set the lpfc data pending flag */
   701  set_bit(LPFC_DATA_READY, >data_flags);
^^^
Not harmful because 1 << 0 is still 1.  But still wrong.

   702  } else {
   703  if (phba->link_state >= LPFC_LINK_UP) {
   704  pring->flag &= 
~LPFC_DEFERRED_RING_EVENT;
   705  lpfc_sli_handle_slow_ring_event(phba, 
pring,
   706  (status 
&
   707  
HA_RXMASK));
   708  }
   709  }

drivers/scsi/lpfc/lpfc_hbadisc.c:2206 
lpfc_mbx_cmpl_fcf_scan_read_fcf_rec()
error: uninitialized symbol 'vlan_id'.
drivers/scsi/lpfc/lpfc_hbadisc.c:2582 
lpfc_mbx_cmpl_fcf_rr_read_fcf_rec()
error: uninitialized symbol 'vlan_id'.
drivers/scsi/lpfc/lpfc_hbadisc.c:2683 lpfc_mbx_cmpl_read_fcf_rec() 
error:
uninitialized symbol 'vlan_id'.

  2201  /* Check the FCF record against the connection list */
  2202  rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, _flag,
  2203_mode, _id);
  2204  
  2205  /* Log the FCF record information if turned on */
  2206  lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
^^^
Perhaps move this under the check for if (!rc) {?

  2207next_fcf_index);
  2208  
  2209  /*
  2210   * If the fcf record does not match with connect list entries
  2211   * read the next entry; otherwise, this is an eligible FCF
  2212   * record for roundrobin FCF failover.
  2213   */
  2214  if (!rc) {
^^^

drivers/scsi/lpfc/lpfc_hbadisc.c:4025 lpfc_register_remote_port()
error: we previously assumed 'rdata' could be null (see line 4023)

  4018  rport = ndlp->rport;
  4019  if (rport) {
  4020  rdata = rport->dd_data;
  4021  /* break the link before dropping the ref */
  4022  ndlp->rport = NULL;
  4023  if (rdata && rdata->pnode == ndlp)
^
Check.

  4024  lpfc_nlp_put(ndlp);
  4025  rdata->pnode = NULL;

Unchecked.

  4026  

Re: [PATCH v3 20/20] checkpatch: warn for use of old PCI pool API

2017-02-27 Thread Joe Perches
On Mon, 2017-02-27 at 13:52 +0100, Romain Perier wrote:

> > I also wonder if you've in fact converted all of the
> > pci_pool struct and function uses why a new checkpatch
> > test is needed at all.
> 
> That's just to avoid futures mistakes/uses.

When all instances and macro definitions are removed
the check is pointless as any newly submitted patch
will not compile.



[bug report] scsi: aacraid: Reorder Adapter status check

2017-02-27 Thread Dan Carpenter
Hello Raghava Aditya Renukunta,

The patch c421530bf848: "scsi: aacraid: Reorder Adapter status check"
from Feb 16, 2017, leads to the following static checker warning:

drivers/scsi/aacraid/src.c:471 aac_src_check_health()
warn: was shift intended here '(status > 16)'

drivers/scsi/aacraid/src.c
   464   */
   465  return 0;
   466  
   467  err_out:
   468  return -1;
   469  
   470  err_blink:
   471  return (status > 16) & 0xFF;
^^^
Issue #1:  This is obviously a typo.

   472  }

Issue #2:  The caller checks for if the return is greater than 2.  It
   never is.  We can remove this dead code.

Issue #3:  The caller passes "bled" to aac_send_iop_reset() which
   ignores it.  What's up with that?  Either it's a bug or we
   should delete that dead code.

regards,
dan carpenter


[PATCH] sd: close hole in > 2T device rejection when !CONFIG_LBDAF

2017-02-27 Thread Steven J. Magnani
When the kernel is compiled _without_ support for large (>= 2TiB) block
devices, code in the sd driver's read_capacity() routines rejects devices
whose count of native-sized blocks does not fit in the 32 bit sector_t
type. A device reporting 4294967296 512-byte blocks will be rejected, but
a device of equal capacity reporting 2147483648 1024-byte blocks will not.

The latter case causes problems, for example misreporting of device
capacity by BLKGETSIZE64. This is because the kernel converts a device's
capacity in native-sized blocks to an equivalent number of 512-byte units
and stores the result in a sector_t - which is too small for devices such
as those noted above.

To prevent this, when the kernel is compiled without support for large
block devices, the read_capacity() routines must reject any device whose
capacity is 2 TiB or greater regardless of its count of native-sized
blocks.

Signed-off-by: Steven J. Magnani 
---
--- a/drivers/scsi/sd.c 2017-02-24 20:29:44.510036363 -0600
+++ b/drivers/scsi/sd.c 2017-02-27 08:19:37.864786958 -0600
@@ -2066,7 +2066,7 @@ static int read_capacity_16(struct scsi_
int the_result;
int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
unsigned int alignment;
-   unsigned long long lba;
+   unsigned long long lba, lba_in_sectors;
unsigned sector_size;
 
if (sdp->no_read_capacity_16)
@@ -2122,7 +2122,10 @@ static int read_capacity_16(struct scsi_
return -ENODEV;
}
 
-   if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xULL)) {
+   /* Make sure logical_to_sectors() won't overflow */
+   lba_in_sectors = lba << (ilog2(sector_size) - 9);
+   if ((sizeof(sdkp->capacity) == 4) &&
+   ((lba >= 0xULL) || (lba_in_sectors >= 0xULL))) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2162,6 +2165,7 @@ static int read_capacity_10(struct scsi_
int the_result;
int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
sector_t lba;
+   unsigned long long lba_in_sectors;
unsigned sector_size;
 
do {
@@ -2208,7 +2212,10 @@ static int read_capacity_10(struct scsi_
return sector_size;
}
 
-   if ((sizeof(sdkp->capacity) == 4) && (lba == 0x)) {
+   /* Make sure logical_to_sectors() won't overflow */
+   lba_in_sectors = ((unsigned long long) lba) << (ilog2(sector_size) - 9);
+   if ((sizeof(sdkp->capacity) == 4) &&
+   (lba_in_sectors >= 0xULL)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");


Re: [LSF/MM TOPIC] do we really need PG_error at all?

2017-02-27 Thread Jeff Layton
On Mon, 2017-02-27 at 11:27 +1100, NeilBrown wrote:
> On Sun, Feb 26 2017, James Bottomley wrote:
> 
> > On Mon, 2017-02-27 at 08:03 +1100, NeilBrown wrote:
> > > On Sun, Feb 26 2017, James Bottomley wrote:
> > > 
> > > > [added linux-scsi and linux-block because this is part of our error
> > > > handling as well]
> > > > On Sun, 2017-02-26 at 09:42 -0500, Jeff Layton wrote:
> > > > > Proposing this as a LSF/MM TOPIC, but it may turn out to be me 
> > > > > just not understanding the semantics here.
> > > > > 
> > > > > As I was looking into -ENOSPC handling in cephfs, I noticed that
> > > > > PG_error is only ever tested in one place [1] 
> > > > > __filemap_fdatawait_range, which does this:
> > > > > 
> > > > >   if (TestClearPageError(page))
> > > > >   ret = -EIO;
> > > > > 
> > > > > This error code will override any AS_* error that was set in the
> > > > > mapping. Which makes me wonder...why don't we just set this error 
> > > > > in the mapping and not bother with a per-page flag? Could we
> > > > > potentially free up a page flag by eliminating this?
> > > > 
> > > > Note that currently the AS_* codes are only set for write errors 
> > > > not for reads and we have no mapping error handling at all for swap
> > > > pages, but I'm sure this is fixable.
> > > 
> > > How is a read error different from a failure to set PG_uptodate?
> > > Does PG_error suppress retries?
> > 
> > We don't do any retries in the code above the block layer (or at least
> > we shouldn't).
> 
> I was wondering about what would/should happen if a read request was
> re-issued for some reason.  Should the error flag on the page cause an
> immediate failure, or should it try again.
> If read-ahead sees a read-error on some future page, is it necessary to
> record the error so subsequent read-aheads don't notice the page is
> missing and repeatedly try to re-load it?
> When the application eventually gets to the faulty page, should a read
> be tried then, or is the read-ahead failure permanent?
> 
> 
> 
> > 
> > > > 
> > > > From the I/O layer point of view we take great pains to try to 
> > > > pinpoint the error exactly to the sector.  We reflect this up by 
> > > > setting the PG_error flag on the page where the error occurred.  If 
> > > > we only set the error on the mapping, we lose that granularity, 
> > > > because the mapping is mostly at the file level (or VMA level for
> > > > anon pages).
> > > 
> > > Are you saying that the IO layer finds the page in the bi_io_vec and
> > > explicitly sets PG_error,
> > 
> > I didn't say anything about the mechanism.  I think the function you're
> > looking for is fs/mpage.c:mpage_end_io().  layers below block indicate
> > the position in the request.  Block maps the position to bio and the
> > bio completion maps to page.  So the actual granularity seen in the
> > upper layer depends on how the page to bio mapping is done.
> 
> If the block layer is just returning the status at a per-bio level (which
> makes perfect sense), then this has nothing directly to do with the
> PG_error flag.
> 
> The page cache needs to do something with bi_error, but it isn't
> immediately clear that it needs to set PG_error.
> 
> > :q
> > >  rather than just passing an error indication to bi_end_io ??  That
> > > would seem to be wrong as the page may not be in the page cache.
> > 
> > Usually pages in the mpage_end_io path are pinned, I think.
> > 
> > >  So I guess I misunderstand you.
> > > 
> > > > 
> > > > So I think the question for filesystem people from us would be do 
> > > > you care about this accuracy?  If it's OK just to know an error
> > > > occurred somewhere in this file, then perhaps we don't need it.
> > > 
> > > I had always assumed that a bio would either succeed or fail, and 
> > > that no finer granularity could be available.
> > 
> > It does ... but a bio can be as small as a single page.
> > 
> > > I think the question here is: Do filesystems need the pagecache to
> > > record which pages have seen an IO error?
> > 
> > It's not just filesystems.  The partition code uses PageError() ... the
> > metadata code might as well (those are things with no mapping).  I'm
> > not saying we can't remove PG_error; I am saying it's not going to be
> > quite as simple as using the AS_ flags.
> 
> The partition code could use PageUptodate().
> mpage_end_io() calls page_endio() on each page, and on read error that
> calls:
> 
>   ClearPageUptodate(page);
>   SetPageError(page);
> 
> are both of these necessary?
> 

> fs/buffer.c can use several bios to read a single page.
> If any one returns an error, PG_error is set.  When all of them have
> completed, if PG_error is clear, PG_uptodate is then set.
> This is an opportunistic use of PG_error, rather than an essential use.
> It could be "fixed", and would need to be fixed if we were to deprecate
> use of PG_error for read errors.
> There are probably other usages like this.
> 

Ok, I think 

Re: [PATCH v5 02/11] phy: exynos-ufs: add UFS PHY driver for EXYNOS SoC

2017-02-27 Thread Alim Akhtar
Hi Kishon,

On 02/27/2017 10:56 AM, Kishon Vijay Abraham I wrote:
> Hi,
>
> On Thursday 23 February 2017 12:20 AM, Alim Akhtar wrote:
>> On Fri, Feb 3, 2017 at 2:49 PM, Alim Akhtar  wrote:
>>> Hi Kishon,
>>>
>>>
>>> On 11/19/2015 07:09 PM, Kishon Vijay Abraham I wrote:

 Hi,

 On Tuesday 17 November 2015 01:41 PM, Alim Akhtar wrote:
>
> Hi
> Thanks again for looking into this.
>
> On 11/17/2015 11:46 AM, Kishon Vijay Abraham I wrote:
>>
>> Hi,
>>
>> On Monday 09 November 2015 10:56 AM, Alim Akhtar wrote:
>>>
>>> From: Seungwon Jeon 
>>>
>>> This patch introduces Exynos UFS PHY driver. This driver
>>> supports to deal with phy calibration and power control
>>> according to UFS host driver's behavior.
>>>
>>> Signed-off-by: Seungwon Jeon 
>>> Signed-off-by: Alim Akhtar 
>>> Cc: Kishon Vijay Abraham I 
>>> ---
>>>   drivers/phy/Kconfig|7 ++
>>>   drivers/phy/Makefile   |1 +
>>>   drivers/phy/phy-exynos-ufs.c   |  241
>>> 
>>>   drivers/phy/phy-exynos-ufs.h   |   85 +
>>>   drivers/phy/phy-exynos7-ufs.h  |   89 +
>>>   include/linux/phy/phy-exynos-ufs.h |   85 +
>>>   6 files changed, 508 insertions(+)
>>>   create mode 100644 drivers/phy/phy-exynos-ufs.c
>>>   create mode 100644 drivers/phy/phy-exynos-ufs.h
>>>   create mode 100644 drivers/phy/phy-exynos7-ufs.h
>>>   create mode 100644 include/linux/phy/phy-exynos-ufs.h
>>>
>>> diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
>>> index 7eb5859dd035..7d38a92e0297 100644
>>> --- a/drivers/phy/Kconfig
>>> +++ b/drivers/phy/Kconfig
>>> @@ -389,4 +389,11 @@ config PHY_CYGNUS_PCIE
>>> Enable this to support the Broadcom Cygnus PCIe PHY.
>>> If unsure, say N.
>>>
>>> +config PHY_EXYNOS_UFS
>>> +tristate "EXYNOS SoC series UFS PHY driver"
>>> +depends on OF && ARCH_EXYNOS || COMPILE_TEST
>>> +select GENERIC_PHY
>>> +help
>>> +  Support for UFS PHY on Samsung EXYNOS chipsets.
>>> +
>>>   endmenu
>>> diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
>>> index 075db1a81aa5..9bec4d1a89e1 100644
>>> --- a/drivers/phy/Makefile
>>> +++ b/drivers/phy/Makefile
>>> @@ -10,6 +10,7 @@ obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY)+=
>>> phy-armada375-usb2.o
>>>   obj-$(CONFIG_BCM_KONA_USB2_PHY)+= phy-bcm-kona-usb2.o
>>>   obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO)+= phy-exynos-dp-video.o
>>>   obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO)+= phy-exynos-mipi-video.o
>>> +obj-$(CONFIG_PHY_EXYNOS_UFS)+= phy-exynos-ufs.o
>>>   obj-$(CONFIG_PHY_LPC18XX_USB_OTG)+= phy-lpc18xx-usb-otg.o
>>>   obj-$(CONFIG_PHY_PXA_28NM_USB2)+= phy-pxa-28nm-usb2.o
>>>   obj-$(CONFIG_PHY_PXA_28NM_HSIC)+= phy-pxa-28nm-hsic.o
>>> diff --git a/drivers/phy/phy-exynos-ufs.c
>>> b/drivers/phy/phy-exynos-ufs.c
>>> new file mode 100644
>>> index ..cb1aeaa3d4eb
>>> --- /dev/null
>>> +++ b/drivers/phy/phy-exynos-ufs.c
>>> @@ -0,0 +1,241 @@
>>> +/*
>>> + * UFS PHY driver for Samsung EXYNOS SoC
>>> + *
>>> + * Copyright (C) 2015 Samsung Electronics Co., Ltd.
>>> + * Author: Seungwon Jeon 
>>> + *
>>> + * This program is free software; you can redistribute it and/or
>>> modify
>>> + * it under the terms of the GNU General Public License as published
>>> by
>>> + * the Free Software Foundation; either version 2 of the License, or
>>> + * (at your option) any later version.
>>> + */
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +#include 
>>> +
>>> +#include "phy-exynos-ufs.h"
>>> +
>>> +#define for_each_phy_lane(phy, i) \
>>> +for (i = 0; i < (phy)->lane_cnt; i++)
>>> +#define for_each_phy_cfg(cfg) \
>>> +for (; (cfg)->id; (cfg)++)
>>> +
>>> +#define PHY_DEF_LANE_CNT1
>>> +
>>> +static void exynos_ufs_phy_config(struct exynos_ufs_phy *phy,
>>> +const struct exynos_ufs_phy_cfg *cfg, u8 lane)
>>> +{
>>> +enum {LANE_0, LANE_1}; /* lane index */
>>> +
>>> +switch (lane) {
>>> +case LANE_0:
>>> +writel(cfg->val, (phy)->reg_pma + cfg->off_0);
>>> +break;
>>> +case LANE_1:
>>> +if (cfg->id == PHY_TRSV_BLK)
>>> +writel(cfg->val, (phy)->reg_pma + cfg->off_1);
>>> +break;
>>> +}
>>> +}
>>> +
>>> +static bool 

Re: [PATCH v3 20/20] checkpatch: warn for use of old PCI pool API

2017-02-27 Thread Romain Perier
Hello,


Le 27/02/2017 à 13:38, Joe Perches a écrit :
> On Mon, 2017-02-27 at 13:26 +0100, Romain Perier wrote:
>> Hello,
>>
>>
>> Le 27/02/2017 à 12:22, Peter Senna Tschudin a écrit :
>>> On Sun, Feb 26, 2017 at 08:24:25PM +0100, Romain Perier wrote:
 pci_pool_*() functions should be replaced by the corresponding functions
 in the DMA pool API. This adds support to check for use of these pci
 functions and display a warning when it is the case.

>>> I guess Joe Perches did sent some comments for this one, did you address
>>> them?
>> See the changelog of 00/20 (for v2). I have already integrated his
>> comments :)
> Not quite.  You need to add blank lines before and after
> the new test you added.

Ok

>
> I also wonder if you've in fact converted all of the
> pci_pool struct and function uses why a new checkpatch
> test is needed at all.

That's just to avoid futures mistakes/uses.

>
> Also, it seems none of these patches have reached lkml.
> Are you sending the patch series with MIME/html parts?

Normally no. I use git send-email for all my patches.

Regards,
Romain

>
>>> Reviewed-by: Peter Senna Tschudin 
 Signed-off-by: Romain Perier 
 ---
  scripts/checkpatch.pl | 9 -
  1 file changed, 8 insertions(+), 1 deletion(-)

 diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
 index baa3c7b..f2c775c 100755
 --- a/scripts/checkpatch.pl
 +++ b/scripts/checkpatch.pl
 @@ -6064,7 +6064,14 @@ sub process {
WARN("USE_DEVICE_INITCALL",
 "please use device_initcall() or more appropriate 
 function instead of __initcall() (see include/linux/init.h)\n" . 
 $herecurr);
}
 -
 +# check for old PCI api pci_pool_*(), use dma_pool_*() instead
 +  if ($line =~ 
 /\bpci_pool(?:_(?:create|destroy|alloc|zalloc|free)|)\b/) {
 +  if (WARN("USE_DMA_POOL",
 +   "please use the dma pool api or more 
 appropriate function instead of the old pci pool api\n" . $herecurr) &&
 +  $fix) {
 +  while ($fixed[$fixlinenr] =~ 
 s/\bpci_pool(_(?:create|destroy|alloc|zalloc|free)|)\b/dma_pool$1/) {}
 +  }
 +  }
  # check for various structs that are normally const (ops, kgdb, 
 device_tree)
if ($line !~ /\bconst\b/ &&
$line =~ /\bstruct\s+($const_structs)\b/) {
 -- 
 2.9.3



Re: [PATCH v3 20/20] checkpatch: warn for use of old PCI pool API

2017-02-27 Thread Joe Perches
On Mon, 2017-02-27 at 13:26 +0100, Romain Perier wrote:
> Hello,
> 
> 
> Le 27/02/2017 à 12:22, Peter Senna Tschudin a écrit :
> > On Sun, Feb 26, 2017 at 08:24:25PM +0100, Romain Perier wrote:
> > > pci_pool_*() functions should be replaced by the corresponding functions
> > > in the DMA pool API. This adds support to check for use of these pci
> > > functions and display a warning when it is the case.
> > > 
> > 
> > I guess Joe Perches did sent some comments for this one, did you address
> > them?
> 
> See the changelog of 00/20 (for v2). I have already integrated his
> comments :)

Not quite.  You need to add blank lines before and after
the new test you added.

I also wonder if you've in fact converted all of the
pci_pool struct and function uses why a new checkpatch
test is needed at all.

Also, it seems none of these patches have reached lkml.
Are you sending the patch series with MIME/html parts?  

> > Reviewed-by: Peter Senna Tschudin 
> > > Signed-off-by: Romain Perier 
> > > ---
> > >  scripts/checkpatch.pl | 9 -
> > >  1 file changed, 8 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
> > > index baa3c7b..f2c775c 100755
> > > --- a/scripts/checkpatch.pl
> > > +++ b/scripts/checkpatch.pl
> > > @@ -6064,7 +6064,14 @@ sub process {
> > >   WARN("USE_DEVICE_INITCALL",
> > >"please use device_initcall() or more appropriate 
> > > function instead of __initcall() (see include/linux/init.h)\n" . 
> > > $herecurr);
> > >   }
> > > -
> > > +# check for old PCI api pci_pool_*(), use dma_pool_*() instead
> > > + if ($line =~ 
> > > /\bpci_pool(?:_(?:create|destroy|alloc|zalloc|free)|)\b/) {
> > > + if (WARN("USE_DMA_POOL",
> > > +  "please use the dma pool api or more 
> > > appropriate function instead of the old pci pool api\n" . $herecurr) &&
> > > + $fix) {
> > > + while ($fixed[$fixlinenr] =~ 
> > > s/\bpci_pool(_(?:create|destroy|alloc|zalloc|free)|)\b/dma_pool$1/) {}
> > > + }
> > > + }
> > >  # check for various structs that are normally const (ops, kgdb, 
> > > device_tree)
> > >   if ($line !~ /\bconst\b/ &&
> > >   $line =~ /\bstruct\s+($const_structs)\b/) {
> > > -- 
> > > 2.9.3


Re: [PATCH v3 20/20] checkpatch: warn for use of old PCI pool API

2017-02-27 Thread Romain Perier
Hello,


Le 27/02/2017 à 12:22, Peter Senna Tschudin a écrit :
> On Sun, Feb 26, 2017 at 08:24:25PM +0100, Romain Perier wrote:
>> pci_pool_*() functions should be replaced by the corresponding functions
>> in the DMA pool API. This adds support to check for use of these pci
>> functions and display a warning when it is the case.
>>
> I guess Joe Perches did sent some comments for this one, did you address
> them?

See the changelog of 00/20 (for v2). I have already integrated his
comments :)

>
> Reviewed-by: Peter Senna Tschudin 
>> Signed-off-by: Romain Perier 
>> ---
>>  scripts/checkpatch.pl | 9 -
>>  1 file changed, 8 insertions(+), 1 deletion(-)
>>
>> diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
>> index baa3c7b..f2c775c 100755
>> --- a/scripts/checkpatch.pl
>> +++ b/scripts/checkpatch.pl
>> @@ -6064,7 +6064,14 @@ sub process {
>>  WARN("USE_DEVICE_INITCALL",
>>   "please use device_initcall() or more appropriate 
>> function instead of __initcall() (see include/linux/init.h)\n" . $herecurr);
>>  }
>> -
>> +# check for old PCI api pci_pool_*(), use dma_pool_*() instead
>> +if ($line =~ 
>> /\bpci_pool(?:_(?:create|destroy|alloc|zalloc|free)|)\b/) {
>> +if (WARN("USE_DMA_POOL",
>> + "please use the dma pool api or more 
>> appropriate function instead of the old pci pool api\n" . $herecurr) &&
>> +$fix) {
>> +while ($fixed[$fixlinenr] =~ 
>> s/\bpci_pool(_(?:create|destroy|alloc|zalloc|free)|)\b/dma_pool$1/) {}
>> +}
>> +}
>>  # check for various structs that are normally const (ops, kgdb, device_tree)
>>  if ($line !~ /\bconst\b/ &&
>>  $line =~ /\bstruct\s+($const_structs)\b/) {
>> -- 
>> 2.9.3
>>



Re: [PATCH v3 20/20] checkpatch: warn for use of old PCI pool API

2017-02-27 Thread Joe Perches
On Mon, 2017-02-27 at 12:22 +0100, Peter Senna Tschudin wrote:
> On Sun, Feb 26, 2017 at 08:24:25PM +0100, Romain Perier wrote:
> > pci_pool_*() functions should be replaced by the corresponding functions
> > in the DMA pool API. This adds support to check for use of these pci
> > functions and display a warning when it is the case.
> > 
> 
> I guess Joe Perches did sent some comments for this one, did you address
> them?


> Reviewed-by: Peter Senna Tschudin 
> > Signed-off-by: Romain Perier 
> > ---
> >  scripts/checkpatch.pl | 9 -
> >  1 file changed, 8 insertions(+), 1 deletion(-)
> > 
> > diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
> > index baa3c7b..f2c775c 100755
> > --- a/scripts/checkpatch.pl
> > +++ b/scripts/checkpatch.pl
> > @@ -6064,7 +6064,14 @@ sub process {
> > WARN("USE_DEVICE_INITCALL",
> >  "please use device_initcall() or more appropriate 
> > function instead of __initcall() (see include/linux/init.h)\n" . $herecurr);
> > }
> > -
> > +# check for old PCI api pci_pool_*(), use dma_pool_*() instead
> > +   if ($line =~ 
> > /\bpci_pool(?:_(?:create|destroy|alloc|zalloc|free)|)\b/) {
> > +   if (WARN("USE_DMA_POOL",
> > +"please use the dma pool api or more 
> > appropriate function instead of the old pci pool api\n" . $herecurr) &&
> > +   $fix) {
> > +   while ($fixed[$fixlinenr] =~ 
> > s/\bpci_pool(_(?:create|destroy|alloc|zalloc|free)|)\b/dma_pool$1/) {}
> > +   }
> > +   }
> >  # check for various structs that are normally const (ops, kgdb, 
> > device_tree)
> > if ($line !~ /\bconst\b/ &&
> > $line =~ /\bstruct\s+($const_structs)\b/) {
> > 

This is nearly identical to the suggestion that I
sent but this is slightly misformatted as it does
not have a leading nor a trailing blank line to
separate the test blocks.

Also, I think none of the patches have reached lkml.

Romain, are you using git-send-email to send these
patches?  Perhaps the patches you send also contain
html which are rejected by the mailing list.



Re: [PATCH v3 12/20] scsi: mpt3sas: Replace PCI pool old API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:17PM +0100, Romain Perier wrote:
> The PCI pool API is deprecated. This commits replaces the PCI pool old
> API by the appropriated function with the DMA pool API.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  drivers/scsi/mpt3sas/mpt3sas_base.c | 73 
> +
>  1 file changed, 34 insertions(+), 39 deletions(-)
> 
> diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c 
> b/drivers/scsi/mpt3sas/mpt3sas_base.c
> index 5b7aec5..5ae1c23 100644
> --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
> +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
> @@ -3200,9 +3200,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
>   }
>  
>   if (ioc->sense) {
> - pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
> - if (ioc->sense_dma_pool)
> - pci_pool_destroy(ioc->sense_dma_pool);
> + dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
> + dma_pool_destroy(ioc->sense_dma_pool);
>   dexitprintk(ioc, pr_info(MPT3SAS_FMT
>   "sense_pool(0x%p): free\n",
>   ioc->name, ioc->sense));
> @@ -3210,9 +3209,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
>   }
>  
>   if (ioc->reply) {
> - pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
> - if (ioc->reply_dma_pool)
> - pci_pool_destroy(ioc->reply_dma_pool);
> + dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
> + dma_pool_destroy(ioc->reply_dma_pool);
>   dexitprintk(ioc, pr_info(MPT3SAS_FMT
>   "reply_pool(0x%p): free\n",
>   ioc->name, ioc->reply));
> @@ -3220,10 +3218,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
>   }
>  
>   if (ioc->reply_free) {
> - pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
> + dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
>   ioc->reply_free_dma);
> - if (ioc->reply_free_dma_pool)
> - pci_pool_destroy(ioc->reply_free_dma_pool);
> + dma_pool_destroy(ioc->reply_free_dma_pool);
>   dexitprintk(ioc, pr_info(MPT3SAS_FMT
>   "reply_free_pool(0x%p): free\n",
>   ioc->name, ioc->reply_free));
> @@ -3234,7 +3231,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
>   do {
>   rps = >reply_post[i];
>   if (rps->reply_post_free) {
> - pci_pool_free(
> + dma_pool_free(
>   ioc->reply_post_free_dma_pool,
>   rps->reply_post_free,
>   rps->reply_post_free_dma);
> @@ -3246,8 +3243,7 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
>   } while (ioc->rdpq_array_enable &&
>  (++i < ioc->reply_queue_count));
>  
> - if (ioc->reply_post_free_dma_pool)
> - pci_pool_destroy(ioc->reply_post_free_dma_pool);
> + dma_pool_destroy(ioc->reply_post_free_dma_pool);
>   kfree(ioc->reply_post);
>   }
>  
> @@ -3268,12 +3264,11 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER 
> *ioc)
>   if (ioc->chain_lookup) {
>   for (i = 0; i < ioc->chain_depth; i++) {
>   if (ioc->chain_lookup[i].chain_buffer)
> - pci_pool_free(ioc->chain_dma_pool,
> + dma_pool_free(ioc->chain_dma_pool,
>   ioc->chain_lookup[i].chain_buffer,
>   ioc->chain_lookup[i].chain_buffer_dma);
>   }
> - if (ioc->chain_dma_pool)
> - pci_pool_destroy(ioc->chain_dma_pool);
> + dma_pool_destroy(ioc->chain_dma_pool);
>   free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
>   ioc->chain_lookup = NULL;
>   }
> @@ -3448,23 +3443,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER 
> *ioc)
>   ioc->name);
>   goto out;
>   }
> - ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
> - ioc->pdev, sz, 16, 0);
> + ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
> + >pdev->dev, sz, 16, 0);
>   if (!ioc->reply_post_free_dma_pool) {
>   pr_err(MPT3SAS_FMT
> -  "reply_post_free pool: pci_pool_create failed\n",
> +  "reply_post_free pool: dma_pool_create failed\n",
>ioc->name);
>   goto out;
>   }
>   i = 0;
>   do {
>   

Re: [PATCH v3 11/20] scsi: megaraid: Replace PCI pool old API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:16PM +0100, Romain Perier wrote:
> The PCI pool API is deprecated. This commits replaces the PCI pool old
> API by the appropriated function with the DMA pool API.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  drivers/scsi/megaraid/megaraid_mbox.c   | 33 +++
>  drivers/scsi/megaraid/megaraid_mm.c | 32 +++---
>  drivers/scsi/megaraid/megaraid_sas_base.c   | 29 +++--
>  drivers/scsi/megaraid/megaraid_sas_fusion.c | 66 
> +
>  4 files changed, 77 insertions(+), 83 deletions(-)
> 
> diff --git a/drivers/scsi/megaraid/megaraid_mbox.c 
> b/drivers/scsi/megaraid/megaraid_mbox.c
> index f0987f2..7dfc2e2 100644
> --- a/drivers/scsi/megaraid/megaraid_mbox.c
> +++ b/drivers/scsi/megaraid/megaraid_mbox.c
> @@ -1153,8 +1153,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
>  
>  
>   // Allocate memory for 16-bytes aligned mailboxes
> - raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
> - adapter->pdev,
> + raid_dev->mbox_pool_handle = dma_pool_create("megaraid mbox pool",
> + >pdev->dev,
>   sizeof(mbox64_t) + 16,
>   16, 0);
>  
> @@ -1164,7 +1164,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
>  
>   mbox_pci_blk = raid_dev->mbox_pool;
>   for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
> - mbox_pci_blk[i].vaddr = pci_pool_alloc(
> + mbox_pci_blk[i].vaddr = dma_pool_alloc(
>   raid_dev->mbox_pool_handle,
>   GFP_KERNEL,
>   _pci_blk[i].dma_addr);
> @@ -1181,8 +1181,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
>* share common memory pool. Passthru structures piggyback on memory
>* allocted to extended passthru since passthru is smaller of the two
>*/
> - raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
> - adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
> + raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru",
> + >pdev->dev, sizeof(mraid_epassthru_t), 128, 0);
>  
>   if (raid_dev->epthru_pool_handle == NULL) {
>   goto fail_setup_dma_pool;
> @@ -1190,7 +1190,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
>  
>   epthru_pci_blk = raid_dev->epthru_pool;
>   for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
> - epthru_pci_blk[i].vaddr = pci_pool_alloc(
> + epthru_pci_blk[i].vaddr = dma_pool_alloc(
>   raid_dev->epthru_pool_handle,
>   GFP_KERNEL,
>   _pci_blk[i].dma_addr);
> @@ -1202,8 +1202,8 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
>  
>   // Allocate memory for each scatter-gather list. Request for 512 bytes
>   // alignment for each sg list
> - raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
> - adapter->pdev,
> + raid_dev->sg_pool_handle = dma_pool_create("megaraid mbox sg",
> + >pdev->dev,
>   sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
>   512, 0);
>  
> @@ -1213,7 +1213,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
>  
>   sg_pci_blk = raid_dev->sg_pool;
>   for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
> - sg_pci_blk[i].vaddr = pci_pool_alloc(
> + sg_pci_blk[i].vaddr = dma_pool_alloc(
>   raid_dev->sg_pool_handle,
>   GFP_KERNEL,
>   _pci_blk[i].dma_addr);
> @@ -1249,29 +1249,26 @@ megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
>  
>   sg_pci_blk = raid_dev->sg_pool;
>   for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
> - pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
> + dma_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
>   sg_pci_blk[i].dma_addr);
>   }
> - if (raid_dev->sg_pool_handle)
> - pci_pool_destroy(raid_dev->sg_pool_handle);
> + dma_pool_destroy(raid_dev->sg_pool_handle);
>  
>  
>   epthru_pci_blk = raid_dev->epthru_pool;
>   for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
> - pci_pool_free(raid_dev->epthru_pool_handle,
> + dma_pool_free(raid_dev->epthru_pool_handle,
>   epthru_pci_blk[i].vaddr, 

Re: [PATCH v3 17/20] usb: gadget: pch_udc: Replace PCI pool old API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:22PM +0100, Romain Perier wrote:
> The PCI pool API is deprecated. This commits replaces the PCI pool old
> API by the appropriated function with the DMA pool API.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  drivers/usb/gadget/udc/pch_udc.c | 31 +++
>  1 file changed, 15 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/usb/gadget/udc/pch_udc.c 
> b/drivers/usb/gadget/udc/pch_udc.c
> index a97da64..84dcbcd 100644
> --- a/drivers/usb/gadget/udc/pch_udc.c
> +++ b/drivers/usb/gadget/udc/pch_udc.c
> @@ -355,8 +355,8 @@ struct pch_udc_dev {
>   vbus_session:1,
>   set_cfg_not_acked:1,
>   waiting_zlp_ack:1;
> - struct pci_pool *data_requests;
> - struct pci_pool *stp_requests;
> + struct dma_pool *data_requests;
> + struct dma_pool *stp_requests;
>   dma_addr_t  dma_addr;
>   struct usb_ctrlrequest  setup_data;
>   void __iomem*base_addr;
> @@ -1522,7 +1522,7 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev 
> *dev,
>   /* do not free first desc., will be done by free for request */
>   td = phys_to_virt(addr);
>   addr2 = (dma_addr_t)td->next;
> - pci_pool_free(dev->data_requests, td, addr);
> + dma_pool_free(dev->data_requests, td, addr);
>   td->next = 0x00;
>   addr = addr2;
>   }
> @@ -1539,7 +1539,7 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev 
> *dev,
>   *
>   * Return codes:
>   *   0:  success,
> - *   -ENOMEM:pci_pool_alloc invocation fails
> + *   -ENOMEM:dma_pool_alloc invocation fails
>   */
>  static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
>   struct pch_udc_request *req,
> @@ -1565,7 +1565,7 @@ static int pch_udc_create_dma_chain(struct pch_udc_ep 
> *ep,
>   if (bytes <= buf_len)
>   break;
>   last = td;
> - td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
> + td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
>   _addr);
>   if (!td)
>   goto nomem;
> @@ -1770,7 +1770,7 @@ static struct usb_request *pch_udc_alloc_request(struct 
> usb_ep *usbep,
>   if (!ep->dev->dma_addr)
>   return >req;
>   /* ep0 in requests are allocated from data pool here */
> - dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
> + dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
> >td_data_phys);
>   if (NULL == dma_desc) {
>   kfree(req);
> @@ -1809,7 +1809,7 @@ static void pch_udc_free_request(struct usb_ep *usbep,
>   if (req->td_data != NULL) {
>   if (req->chain_len > 1)
>   pch_udc_free_dma_chain(ep->dev, req);
> - pci_pool_free(ep->dev->data_requests, req->td_data,
> + dma_pool_free(ep->dev->data_requests, req->td_data,
> req->td_data_phys);
>   }
>   kfree(req);
> @@ -2914,7 +2914,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
>   void*ep0out_buf;
>  
>   /* DMA setup */
> - dev->data_requests = pci_pool_create("data_requests", dev->pdev,
> + dev->data_requests = dma_pool_create("data_requests", >pdev->dev,
>   sizeof(struct pch_udc_data_dma_desc), 0, 0);
>   if (!dev->data_requests) {
>   dev_err(>pdev->dev, "%s: can't get request data pool\n",
> @@ -2923,7 +2923,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
>   }
>  
>   /* dma desc for setup data */
> - dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
> + dev->stp_requests = dma_pool_create("setup requests", >pdev->dev,
>   sizeof(struct pch_udc_stp_dma_desc), 0, 0);
>   if (!dev->stp_requests) {
>   dev_err(>pdev->dev, "%s: can't get setup request pool\n",
> @@ -2931,7 +2931,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
>   return -ENOMEM;
>   }
>   /* setup */
> - td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
> + td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
>   >ep[UDC_EP0OUT_IDX].td_stp_phys);
>   if (!td_stp) {
>   dev_err(>pdev->dev,
> @@ -2941,7 +2941,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
>   dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
>  
>   /* data: 0 packets !? */
> - td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
> + td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
>   >ep[UDC_EP0OUT_IDX].td_data_phys);
>  

Re: [PATCH v3 13/20] scsi: mvsas: Replace PCI pool old API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:18PM +0100, Romain Perier wrote:
> The PCI pool API is deprecated. This commits replaces the PCI pool old
> API by the appropriated function with the DMA pool API.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  drivers/scsi/mvsas/mv_init.c | 6 +++---
>  drivers/scsi/mvsas/mv_sas.c  | 6 +++---
>  2 files changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
> index 8280046..41d2276 100644
> --- a/drivers/scsi/mvsas/mv_init.c
> +++ b/drivers/scsi/mvsas/mv_init.c
> @@ -125,8 +125,7 @@ static void mvs_free(struct mvs_info *mvi)
>   else
>   slot_nr = MVS_CHIP_SLOT_SZ;
>  
> - if (mvi->dma_pool)
> - pci_pool_destroy(mvi->dma_pool);
> + dma_pool_destroy(mvi->dma_pool);
>  
>   if (mvi->tx)
>   dma_free_coherent(mvi->dev,
> @@ -296,7 +295,8 @@ static int mvs_alloc(struct mvs_info *mvi, struct 
> Scsi_Host *shost)
>   goto err_out;
>  
>   sprintf(pool_name, "%s%d", "mvs_dma_pool", mvi->id);
> - mvi->dma_pool = pci_pool_create(pool_name, mvi->pdev, MVS_SLOT_BUF_SZ, 
> 16, 0);
> + mvi->dma_pool = dma_pool_create(pool_name, >pdev->dev,
> + MVS_SLOT_BUF_SZ, 16, 0);
>   if (!mvi->dma_pool) {
>   printk(KERN_DEBUG "failed to create dma pool %s.\n", 
> pool_name);
>   goto err_out;
> diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
> index c7cc803..ee81d10 100644
> --- a/drivers/scsi/mvsas/mv_sas.c
> +++ b/drivers/scsi/mvsas/mv_sas.c
> @@ -790,7 +790,7 @@ static int mvs_task_prep(struct sas_task *task, struct 
> mvs_info *mvi, int is_tmf
>   slot->n_elem = n_elem;
>   slot->slot_tag = tag;
>  
> - slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, >buf_dma);
> + slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, >buf_dma);
>   if (!slot->buf) {
>   rc = -ENOMEM;
>   goto err_out_tag;
> @@ -840,7 +840,7 @@ static int mvs_task_prep(struct sas_task *task, struct 
> mvs_info *mvi, int is_tmf
>   return rc;
>  
>  err_out_slot_buf:
> - pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
> + dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
>  err_out_tag:
>   mvs_tag_free(mvi, tag);
>  err_out:
> @@ -918,7 +918,7 @@ static void mvs_slot_task_free(struct mvs_info *mvi, 
> struct sas_task *task,
>   }
>  
>   if (slot->buf) {
> - pci_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
> + dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
>   slot->buf = NULL;
>   }
>   list_del_init(>entry);
> -- 
> 2.9.3
> 


Re: [PATCH v3 10/20] scsi: lpfc: Replace PCI pool old API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:15PM +0100, Romain Perier wrote:
> The PCI pool API is deprecated. This commits replaces the PCI pool old
> API by the appropriated function with the DMA pool API. It also updates
> some comments, accordingly.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  drivers/scsi/lpfc/lpfc.h   |  12 ++---
>  drivers/scsi/lpfc/lpfc_init.c  |  16 +++
>  drivers/scsi/lpfc/lpfc_mem.c   | 105 
> -
>  drivers/scsi/lpfc/lpfc_nvme.c  |   6 +--
>  drivers/scsi/lpfc/lpfc_nvmet.c |   4 +-
>  drivers/scsi/lpfc/lpfc_scsi.c  |  12 ++---
>  6 files changed, 76 insertions(+), 79 deletions(-)
> 
> diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
> index 0bba2e3..29492bc 100644
> --- a/drivers/scsi/lpfc/lpfc.h
> +++ b/drivers/scsi/lpfc/lpfc.h
> @@ -934,12 +934,12 @@ struct lpfc_hba {
>   spinlock_t hbalock;
>  
>   /* pci_mem_pools */
> - struct pci_pool *lpfc_sg_dma_buf_pool;
> - struct pci_pool *lpfc_mbuf_pool;
> - struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
> - struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
> - struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
> - struct pci_pool *txrdy_payload_pool;
> + struct dma_pool *lpfc_sg_dma_buf_pool;
> + struct dma_pool *lpfc_mbuf_pool;
> + struct dma_pool *lpfc_hrb_pool; /* header receive buffer pool */
> + struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
> + struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
> + struct dma_pool *txrdy_payload_pool;
>   struct lpfc_dma_pool lpfc_mbuf_safety_pool;
>  
>   mempool_t *mbox_mem_pool;
> diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
> index 0ee429d..b856457 100644
> --- a/drivers/scsi/lpfc/lpfc_init.c
> +++ b/drivers/scsi/lpfc/lpfc_init.c
> @@ -3151,7 +3151,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
>   list_for_each_entry_safe(sb, sb_next, >lpfc_scsi_buf_list_put,
>list) {
>   list_del(>list);
> - pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
> + dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
> sb->dma_handle);
>   kfree(sb);
>   phba->total_scsi_bufs--;
> @@ -3162,7 +3162,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
>   list_for_each_entry_safe(sb, sb_next, >lpfc_scsi_buf_list_get,
>list) {
>   list_del(>list);
> - pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
> + dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
> sb->dma_handle);
>   kfree(sb);
>   phba->total_scsi_bufs--;
> @@ -3193,7 +3193,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
>   list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
>>lpfc_nvme_buf_list_put, list) {
>   list_del(_ncmd->list);
> - pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
> + dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
> lpfc_ncmd->dma_handle);
>   kfree(lpfc_ncmd);
>   phba->total_nvme_bufs--;
> @@ -3204,7 +3204,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
>   list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
>>lpfc_nvme_buf_list_get, list) {
>   list_del(_ncmd->list);
> - pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
> + dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
> lpfc_ncmd->dma_handle);
>   kfree(lpfc_ncmd);
>   phba->total_nvme_bufs--;
> @@ -3517,7 +3517,7 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
>   list_remove_head(_sgl_list, psb,
>struct lpfc_scsi_buf, list);
>   if (psb) {
> - pci_pool_free(phba->lpfc_sg_dma_buf_pool,
> + dma_pool_free(phba->lpfc_sg_dma_buf_pool,
> psb->data, psb->dma_handle);
>   kfree(psb);
>   }
> @@ -3614,7 +3614,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
>   list_remove_head(_sgl_list, lpfc_ncmd,
>struct lpfc_nvme_buf, list);
>   if (lpfc_ncmd) {
> - pci_pool_free(phba->lpfc_sg_dma_buf_pool,
> + dma_pool_free(phba->lpfc_sg_dma_buf_pool,
> lpfc_ncmd->data,
> lpfc_ncmd->dma_handle);
>   

Re: [PATCH v3 19/20] PCI: Remove PCI pool macro functions

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:24PM +0100, Romain Perier wrote:
> Now that all the drivers use dma pool API, we can remove the macro
> functions for PCI pool.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  include/linux/pci.h | 9 -
>  1 file changed, 9 deletions(-)
> 
> diff --git a/include/linux/pci.h b/include/linux/pci.h
> index 282ed32..d206ba4 100644
> --- a/include/linux/pci.h
> +++ b/include/linux/pci.h
> @@ -1281,15 +1281,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool 
> decode,
>  #include 
>  #include 
>  
> -#define  pci_pool dma_pool
> -#define pci_pool_create(name, pdev, size, align, allocation) \
> - dma_pool_create(name, >dev, size, align, allocation)
> -#define  pci_pool_destroy(pool) dma_pool_destroy(pool)
> -#define  pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, 
> handle)
> -#define  pci_pool_zalloc(pool, flags, handle) \
> - dma_pool_zalloc(pool, flags, handle)
> -#define  pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, 
> addr)
> -
>  struct msix_entry {
>   u32 vector; /* kernel uses to write allocated vector */
>   u16 entry;  /* driver uses to specify entry, OS writes */
> -- 
> 2.9.3
> 


Re: [PATCH v3 20/20] checkpatch: warn for use of old PCI pool API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:25PM +0100, Romain Perier wrote:
> pci_pool_*() functions should be replaced by the corresponding functions
> in the DMA pool API. This adds support to check for use of these pci
> functions and display a warning when it is the case.
>

I guess Joe Perches did sent some comments for this one, did you address
them?

Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  scripts/checkpatch.pl | 9 -
>  1 file changed, 8 insertions(+), 1 deletion(-)
> 
> diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
> index baa3c7b..f2c775c 100755
> --- a/scripts/checkpatch.pl
> +++ b/scripts/checkpatch.pl
> @@ -6064,7 +6064,14 @@ sub process {
>   WARN("USE_DEVICE_INITCALL",
>"please use device_initcall() or more appropriate 
> function instead of __initcall() (see include/linux/init.h)\n" . $herecurr);
>   }
> -
> +# check for old PCI api pci_pool_*(), use dma_pool_*() instead
> + if ($line =~ 
> /\bpci_pool(?:_(?:create|destroy|alloc|zalloc|free)|)\b/) {
> + if (WARN("USE_DMA_POOL",
> +  "please use the dma pool api or more 
> appropriate function instead of the old pci pool api\n" . $herecurr) &&
> + $fix) {
> + while ($fixed[$fixlinenr] =~ 
> s/\bpci_pool(_(?:create|destroy|alloc|zalloc|free)|)\b/dma_pool$1/) {}
> + }
> + }
>  # check for various structs that are normally const (ops, kgdb, device_tree)
>   if ($line !~ /\bconst\b/ &&
>   $line =~ /\bstruct\s+($const_structs)\b/) {
> -- 
> 2.9.3
> 


Re: [PATCH v3 18/20] usb: host: Remove remaining pci_pool in comments

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:23PM +0100, Romain Perier wrote:
> This replaces remaining occurences of pci_pool by dma_pool, as
> this is the new API that could be used for that purpose.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  drivers/usb/host/ehci-hcd.c | 2 +-
>  drivers/usb/host/fotg210-hcd.c  | 2 +-
>  drivers/usb/host/oxu210hp-hcd.c | 2 +-
>  3 files changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
> index ac2c4ea..6e834b83 100644
> --- a/drivers/usb/host/ehci-hcd.c
> +++ b/drivers/usb/host/ehci-hcd.c
> @@ -597,7 +597,7 @@ static int ehci_run (struct usb_hcd *hcd)
>   /*
>* hcc_params controls whether ehci->regs->segment must (!!!)
>* be used; it constrains QH/ITD/SITD and QTD locations.
> -  * pci_pool consistent memory always uses segment zero.
> +  * dma_pool consistent memory always uses segment zero.
>* streaming mappings for I/O buffers, like pci_map_single(),
>* can return segments above 4GB, if the device allows.
>*
> diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
> index 1c5b34b..ced08dc 100644
> --- a/drivers/usb/host/fotg210-hcd.c
> +++ b/drivers/usb/host/fotg210-hcd.c
> @@ -5047,7 +5047,7 @@ static int fotg210_run(struct usb_hcd *hcd)
>   /*
>* hcc_params controls whether fotg210->regs->segment must (!!!)
>* be used; it constrains QH/ITD/SITD and QTD locations.
> -  * pci_pool consistent memory always uses segment zero.
> +  * dma_pool consistent memory always uses segment zero.
>* streaming mappings for I/O buffers, like pci_map_single(),
>* can return segments above 4GB, if the device allows.
>*
> diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
> index bcf531c..ed20fb3 100644
> --- a/drivers/usb/host/oxu210hp-hcd.c
> +++ b/drivers/usb/host/oxu210hp-hcd.c
> @@ -2708,7 +2708,7 @@ static int oxu_run(struct usb_hcd *hcd)
>  
>   /* hcc_params controls whether oxu->regs->segment must (!!!)
>* be used; it constrains QH/ITD/SITD and QTD locations.
> -  * pci_pool consistent memory always uses segment zero.
> +  * dma_pool consistent memory always uses segment zero.
>* streaming mappings for I/O buffers, like pci_map_single(),
>* can return segments above 4GB, if the device allows.
>*
> -- 
> 2.9.3
> 


Re: [PATCH v3 07/20] wireless: ipw2200: Replace PCI pool old API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:12PM +0100, Romain Perier wrote:
> The PCI pool API is deprecated. This commits replaces the PCI pool old
> API by the appropriated function with the DMA pool API.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  drivers/net/wireless/intel/ipw2x00/ipw2200.c | 13 +++--
>  1 file changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c 
> b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
> index 5ef3c5c..93dfe47 100644
> --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
> +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
> @@ -3211,7 +3211,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 
> * data, size_t len)
>   struct fw_chunk *chunk;
>   int total_nr = 0;
>   int i;
> - struct pci_pool *pool;
> + struct dma_pool *pool;
>   void **virts;
>   dma_addr_t *phys;
>  
> @@ -3228,9 +3228,10 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 
> * data, size_t len)
>   kfree(virts);
>   return -ENOMEM;
>   }
> - pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
> + pool = dma_pool_create("ipw2200", >pci_dev->dev, CB_MAX_LENGTH, 0,
> +0);
>   if (!pool) {
> - IPW_ERROR("pci_pool_create failed\n");
> + IPW_ERROR("dma_pool_create failed\n");
>   kfree(phys);
>   kfree(virts);
>   return -ENOMEM;
> @@ -3255,7 +3256,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 
> * data, size_t len)
>  
>   nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
>   for (i = 0; i < nr; i++) {
> - virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
> + virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL,
>[total_nr]);
>   if (!virts[total_nr]) {
>   ret = -ENOMEM;
> @@ -3299,9 +3300,9 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 
> * data, size_t len)
>   }
>   out:
>   for (i = 0; i < total_nr; i++)
> - pci_pool_free(pool, virts[i], phys[i]);
> + dma_pool_free(pool, virts[i], phys[i]);
>  
> - pci_pool_destroy(pool);
> + dma_pool_destroy(pool);
>   kfree(phys);
>   kfree(virts);
>  
> -- 
> 2.9.3
> 


Re: [PATCH v3 00/20] Replace PCI pool by DMA pool API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:05PM +0100, Romain Perier wrote:
> The current PCI pool API are simple macro functions direct expanded to
> the appropriated dma pool functions. The prototypes are almost the same
> and semantically, they are very similar. I propose to use the DMA pool
> API directly and get rid of the old API.
> 
> This set of patches, replaces the old API by the dma pool API, adds
> support to warn about this old API in checkpath.pl and remove the
> defines.
> 
> Changes in v3:
> - Rebased series onto next-20170224
> - Fix checkpath.pl reports for patch 11/20 and patch 12/20
> - Remove prefix RFC
> Changes in v2:
> - Introduced patch 18/20
> - Fixed cosmetic changes: spaces before brace, live over 80 characters
> - Removed some of the check for NULL pointers before calling dma_pool_destroy
> - Improved the regexp in checkpatch for pci_pool, thanks to Joe Perches
> - Added Tested-by and Acked-by tags

Tested the series with checkpatch and compiling with allyesconfig.

> 
> Romain Perier (20):
>   block: DAC960: Replace PCI pool old API
>   dmaengine: pch_dma: Replace PCI pool old API
>   IB/mthca: Replace PCI pool old API
>   net: e100: Replace PCI pool old API
>   mlx4: Replace PCI pool old API
>   mlx5: Replace PCI pool old API
>   wireless: ipw2200: Replace PCI pool old API
>   scsi: be2iscsi: Replace PCI pool old API
>   scsi: csiostor: Replace PCI pool old API
>   scsi: lpfc: Replace PCI pool old API
>   scsi: megaraid: Replace PCI pool old API
>   scsi: mpt3sas: Replace PCI pool old API
>   scsi: mvsas: Replace PCI pool old API
>   scsi: pmcraid: Replace PCI pool old API
>   usb: gadget: amd5536udc: Replace PCI pool old API
>   usb: gadget: net2280: Replace PCI pool old API
>   usb: gadget: pch_udc: Replace PCI pool old API
>   usb: host: Remove remaining pci_pool in comments
>   PCI: Remove PCI pool macro functions
>   checkpatch: warn for use of old PCI pool API
> 
>  drivers/block/DAC960.c|  36 -
>  drivers/block/DAC960.h|   4 +-
>  drivers/dma/pch_dma.c |  12 +--
>  drivers/infiniband/hw/mthca/mthca_av.c|  10 +--
>  drivers/infiniband/hw/mthca/mthca_cmd.c   |   8 +-
>  drivers/infiniband/hw/mthca/mthca_dev.h   |   4 +-
>  drivers/net/ethernet/intel/e100.c |  12 +--
>  drivers/net/ethernet/mellanox/mlx4/cmd.c  |  10 +--
>  drivers/net/ethernet/mellanox/mlx4/mlx4.h |   2 +-
>  drivers/net/ethernet/mellanox/mlx5/core/cmd.c |  11 +--
>  drivers/net/wireless/intel/ipw2x00/ipw2200.c  |  13 ++--
>  drivers/scsi/be2iscsi/be_iscsi.c  |   6 +-
>  drivers/scsi/be2iscsi/be_main.c   |   6 +-
>  drivers/scsi/be2iscsi/be_main.h   |   2 +-
>  drivers/scsi/csiostor/csio_hw.h   |   2 +-
>  drivers/scsi/csiostor/csio_init.c |  11 +--
>  drivers/scsi/csiostor/csio_scsi.c |   6 +-
>  drivers/scsi/lpfc/lpfc.h  |  12 +--
>  drivers/scsi/lpfc/lpfc_init.c |  16 ++--
>  drivers/scsi/lpfc/lpfc_mem.c  | 105 
> +-
>  drivers/scsi/lpfc/lpfc_nvme.c |   6 +-
>  drivers/scsi/lpfc/lpfc_nvmet.c|   4 +-
>  drivers/scsi/lpfc/lpfc_scsi.c |  12 +--
>  drivers/scsi/megaraid/megaraid_mbox.c |  33 
>  drivers/scsi/megaraid/megaraid_mm.c   |  32 
>  drivers/scsi/megaraid/megaraid_sas_base.c |  29 +++
>  drivers/scsi/megaraid/megaraid_sas_fusion.c   |  66 
>  drivers/scsi/mpt3sas/mpt3sas_base.c   |  73 +-
>  drivers/scsi/mvsas/mv_init.c  |   6 +-
>  drivers/scsi/mvsas/mv_sas.c   |   6 +-
>  drivers/scsi/pmcraid.c|  10 +--
>  drivers/scsi/pmcraid.h|   2 +-
>  drivers/usb/gadget/udc/amd5536udc.c   |   8 +-
>  drivers/usb/gadget/udc/amd5536udc.h   |   4 +-
>  drivers/usb/gadget/udc/net2280.c  |  12 +--
>  drivers/usb/gadget/udc/net2280.h  |   2 +-
>  drivers/usb/gadget/udc/pch_udc.c  |  31 
>  drivers/usb/host/ehci-hcd.c   |   2 +-
>  drivers/usb/host/fotg210-hcd.c|   2 +-
>  drivers/usb/host/oxu210hp-hcd.c   |   2 +-
>  include/linux/mlx5/driver.h   |   2 +-
>  include/linux/pci.h   |   9 ---
>  scripts/checkpatch.pl |   9 ++-
>  43 files changed, 318 insertions(+), 332 deletions(-)
> 
> -- 
> 2.9.3
> 


Re: [PATCH v3 06/20] mlx5: Replace PCI pool old API

2017-02-27 Thread Peter Senna Tschudin
On Sun, Feb 26, 2017 at 08:24:11PM +0100, Romain Perier wrote:
> The PCI pool API is deprecated. This commits replaces the PCI pool old
> API by the appropriated function with the DMA pool API.
> 
Reviewed-by: Peter Senna Tschudin 
> Signed-off-by: Romain Perier 
> ---
>  drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 11 ++-
>  include/linux/mlx5/driver.h   |  2 +-
>  2 files changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c 
> b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
> index caa837e..6eef344 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
> @@ -1061,7 +1061,7 @@ static struct mlx5_cmd_mailbox *alloc_cmd_box(struct 
> mlx5_core_dev *dev,
>   if (!mailbox)
>   return ERR_PTR(-ENOMEM);
>  
> - mailbox->buf = pci_pool_zalloc(dev->cmd.pool, flags,
> + mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
>  >dma);
>   if (!mailbox->buf) {
>   mlx5_core_dbg(dev, "failed allocation\n");
> @@ -1076,7 +1076,7 @@ static struct mlx5_cmd_mailbox *alloc_cmd_box(struct 
> mlx5_core_dev *dev,
>  static void free_cmd_box(struct mlx5_core_dev *dev,
>struct mlx5_cmd_mailbox *mailbox)
>  {
> - pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
> + dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
>   kfree(mailbox);
>  }
>  
> @@ -1696,7 +1696,8 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
>   return -EINVAL;
>   }
>  
> - cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
> + cmd->pool = dma_pool_create("mlx5_cmd", >pdev->dev, size, align,
> + 0);
>   if (!cmd->pool)
>   return -ENOMEM;
>  
> @@ -1786,7 +1787,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
>   free_cmd_page(dev, cmd);
>  
>  err_free_pool:
> - pci_pool_destroy(cmd->pool);
> + dma_pool_destroy(cmd->pool);
>  
>   return err;
>  }
> @@ -1800,6 +1801,6 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
>   destroy_workqueue(cmd->wq);
>   destroy_msg_cache(dev);
>   free_cmd_page(dev, cmd);
> - pci_pool_destroy(cmd->pool);
> + dma_pool_destroy(cmd->pool);
>  }
>  EXPORT_SYMBOL(mlx5_cmd_cleanup);
> diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
> index 2fcff6b..13a267c 100644
> --- a/include/linux/mlx5/driver.h
> +++ b/include/linux/mlx5/driver.h
> @@ -284,7 +284,7 @@ struct mlx5_cmd {
>   struct semaphore pages_sem;
>   int mode;
>   struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
> - struct pci_pool *pool;
> + struct dma_pool *pool;
>   struct mlx5_cmd_debug dbg;
>   struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
>   int checksum_disabled;
> -- 
> 2.9.3
> 


Re: Kernel crash with 4.10 trying to remove scsi disks

2017-02-27 Thread Johannes Thumshirn
On 02/24/2017 04:07 PM, Farhan Ali wrote:
> Hello,
> 
> I have noticed a kernel crash with 4.10 kernel in our s390 environment,
> running a test trying to remove scsi disks. Here is a snippet of the
> kernel crash message:

Hi Farhan,

Is this crash newly introduced with 4.10? Do you maybe have a chance to
bisect it?

Thanks,
Johannes
-- 
Johannes Thumshirn  Storage
jthumsh...@suse.de+49 911 74053 689
SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
GF: Felix Imendörffer, Jane Smithard, Graham Norton
HRB 21284 (AG Nürnberg)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850