Hi Florian,

I may have misunderstood the indication of your beginning.

Was the answer of your beginning the meaning that the problem solved if I set a 
portals parameter?

example)
 primitive prmiSCSITarget ocf:heartbeat:iSCSITarget \
         params iqn="iqn.2001-04.com.example:Test01" tid="1" portals="" \ ---> 
set blank. 
         op start interval="0s" timeout="60s" on-fail="restart" \
         op monitor interval="60s" timeout="60s" on-fail="restart" \
         op stop interval="0s" timeout="60s" on-fail="fence"

The intention of the problem is different if so. 

When it is a problem to get log when a user does not set a portals parameter, I 
want to say a thing.

Because the user understands it if the portals parameter is unnecessary.

I think that it causes confusion to set an unnecessary portals parameter in a 
blank.

Best Regards,
Hideo Yamauchi.


--- [email protected] wrote:

> Hi Florial,
> 
> > > I think these warning log to be good only in probe or validate-all.
> > 
> > I disagree. I think it's actually a good idea to warn users very noisily
> > that they are expecting functionality from the resource that the agent
> > cannot support.
> 
> I agree that warning log appears when a "portals" parameter was appointed in 
> the case of
> iet,tgt. 
> However, I do not appoint the "portals" parameter.
> 
> There seems to be the problem to the default of the "portals" parameter. 
> When a "portals" parameter is not appointed in the case of iet,tgt, 
> iSCSITarget must not set a
> default.
> 
> # Listen on 0.0.0.0:3260 by default
> OCF_RESKEY_portals_default="0.0.0.0:3260"
> : ${OCF_RESKEY_portals=${OCF_RESKEY_portals_default}}
> 
> 
> > 
> > Or do you have a configuration where portals is actually _not_ set on
> > the resource, and it's still complaining? Can I see your "crm configure
> > show" please?
> 
> [r...@srv01 ~]# crm configure show
> INFO: building help index
> INFO: object rsc_defaults-meta_attributes cannot be represented in the CLI 
> notation
> node $id="a5704d58-e5a1-453e-96d0-417026504519" srv02
> node $id="cdf7616d-77a7-408b-aa2c-cb06b9bbc62d" srv01
> primitive clnPrmPingd ocf:pacemaker:pingd \
>         meta migration-threshold="10" \
>         params name="default_ping_set" host_list="192.168.40.1" 
> multiplier="100" dampen="0" \
>         op start interval="0" timeout="60s" on-fail="restart" \
>         op monitor interval="10s" timeout="60s" on-fail="restart" \
>         op stop interval="0" timeout="60s" on-fail="ignore"
> primitive prmDiskd1 ocf:pacemaker:diskd \
>         params name="diskcheck_status_internal" device="/dev/sdc" 
> interval="10" \
>         op start interval="0s" timeout="60s" on-fail="restart" \
>         op monitor interval="10s" timeout="60s" on-fail="restart" \
>         op stop interval="0s" timeout="60s" on-fail="ignore"
> primitive prmDrbd ocf:linbit:drbd \
>         params drbd_resource="r0" \
>         op start interval="0" timeout="240s" on-fail="restart" \
>         op monitor interval="10" role="Master" timeout="60s" 
> on-fail="restart" \
>         op monitor interval="15" role="Slave" timeout="61s" on-fail="restart" 
> \
>         op stop interval="0" timeout="100s" on-fail="fence"
> primitive prmIpiSCSI ocf:heartbeat:IPaddr2 \
>         params ip="192.168.40.77" nic="eth0" cidr_netmask="24" \
>         op start interval="0s" timeout="60s" on-fail="restart" \
>         op monitor interval="10s" timeout="60s" on-fail="restart" \
>         op stop interval="0s" timeout="60s" on-fail="fence"
> primitive prmStonithN1 stonith:external/ssh \
>         params priority="1" stonith-timeout="60s" hostlist="srv01" \
>         op start interval="0" timeout="60s" \
>         op monitor interval="10s" timeout="60s" \
>         op stop interval="0" timeout="60s"
> primitive prmStonithN2 stonith:external/ssh \
>         params priority="1" stonith-timeout="300s" hostlist="srv02" \
>         op start interval="0" timeout="60s" \
>         op monitor interval="10s" timeout="60s" \
>         op stop interval="0" timeout="60s"
> primitive prmiSCSILogicalUnit ocf:heartbeat:iSCSILogicalUnit \
>         params lun="1" path="/dev/drbd0" 
> target_iqn="iqn.2001-04.com.example:Test01" \
>         op start interval="0s" timeout="60s" on-fail="restart" \
>         op monitor interval="60s" timeout="60s" on-fail="restart" \
>         op stop interval="0s" timeout="60s" on-fail="fence"
> primitive prmiSCSITarget ocf:heartbeat:iSCSITarget \
>         params iqn="iqn.2001-04.com.example:Test01" tid="1" \
>         op start interval="0s" timeout="60s" on-fail="restart" \
>         op monitor interval="60s" timeout="60s" on-fail="restart" \
>         op stop interval="0s" timeout="60s" on-fail="fence"
> group grpStonith1 prmStonithN1
> group grpStonith2 prmStonithN2
> group iSCSIgroup01 prmIpiSCSI prmiSCSITarget prmiSCSILogicalUnit
> ms msGroup01 prmDrbd \
>         meta globally-unique="false" master-max="1" master-node-max="1" 
> clone-max="2"
> clone-node-max="1" notify="true"
> clone clnDiskd1 prmDiskd1
> clone clnPingd clnPrmPingd
> location grpStonith1-1-location grpStonith1 \
>         rule $id="rule-Stonith1-1" -inf: #uname eq srv01
> location grpStonith2-1-location grpStonith2 \
>         rule $id="rule-Stonith2-1" -inf: #uname eq srv02
> location location-grp01-1 msGroup01 \
>         rule $id="rule-grp01-1" 200: #uname eq srv01 \
>         rule $id="rule-grp01-4" 100: #uname eq srv02 \
>         rule $id="rule-grp01-2" -inf: not_defined default_ping_set or 
> default_ping_set lt 100 \
>         rule $id="rule-grp01-3" -inf: not_defined diskcheck_status_internal or
> diskcheck_status_internal eq ERROR
> colocation rsc_colocation01 inf: iSCSIgroup01 msGroup01:Master
> colocation rsc_colocation02 inf: msGroup01:Master clnPingd
> colocation rsc_colocation03 inf: msGroup01 clnDiskd1
> order order-1 0: clnDiskd1 msGroup01
> order order-2 0: clnPingd msGroup01
> order order-3 0: msGroup01:promote iSCSIgroup01:start
> property $id="cib-bootstrap-options" \
>         dc-version="1.0.10-b2e39d318fda501e2fcf223c2d039b721f3679a9" \
>         cluster-infrastructure="Heartbeat" \
>         no-quorum-policy="ignore" \
>         stonith-enabled="true" \
>         startup-fencing="false" \
>         stonith-timeout="120s" \
>         dc-deadtime="48s"
> xml <meta_attributes id="rsc_defaults-meta_attributes"> \
>         <nvpair id="rsc-default-meta_attributes-resource-stickiness" 
> name="resource-stickiness"
> value="INFINITY"/> \
>         <nvpair id="rsc-default-meta_attributes-migration-threshold" 
> name="migration-threshold"
> value="1"/> \
> </meta_attributes>
> 
> 
> Best Regards,
> Hideo Yamauchi.
> 
> --- Florian Haas <[email protected]> wrote:
> 
> > On 12/17/2010 07:22 AM, [email protected] wrote:
> > > Hi All,
> > > 
> > > We test the iSCSI environment. 
> > > 
> > > And I confirmed the log of the noise that was output every time when a 
> > > monitor was carried
> out
> > in the
> > > environment that used tgt.
> > > 
> > > Dec 15 16:42:32 srv02 iSCSITarget[5601]: WARNING: Configuration parameter 
> > > "portals" is not
> > supported
> > > by the iSCSI implementation and will be ignored.
> > > Dec 15 16:43:34 srv02 iSCSITarget[6023]: WARNING: Configuration parameter 
> > > "portals" is not
> > supported
> > > by the iSCSI implementation and will be ignored.
> > > Dec 15 16:44:37 srv02 iSCSITarget[6473]: WARNING: Configuration parameter 
> > > "portals" is not
> > supported
> > > by the iSCSI implementation and will be ignored.
> > > Dec 15 16:45:39 srv02 iSCSITarget[6923]: WARNING: Configuration parameter 
> > > "portals" is not
> > supported
> > > by the iSCSI implementation and will be ignored.
> > > Dec 15 16:46:41 srv02 iSCSITarget[7373]: WARNING: Configuration parameter 
> > > "portals" is not
> > supported
> > > by the iSCSI implementation and will be ignored.
> > > 
> > > I think these warning log to be good only in probe or validate-all.
> > 
> > I disagree. I think it's actually a good idea to warn users very noisily
> > that they are expecting functionality from the resource that the agent
> > cannot support.
> > 
> > Or do you have a configuration where portals is actually _not_ set on
> > the resource, and it's still complaining? Can I see your "crm configure
> > show" please?
> > 
> > Florian
> > 
> > > _______________________________________________________
> > Linux-HA-Dev: [email protected]
> > http://lists.linux-ha.org/mailman/listinfo/linux-ha-dev
> > Home Page: http://linux-ha.org/
> > 
> 
> _______________________________________________________
> Linux-HA-Dev: [email protected]
> http://lists.linux-ha.org/mailman/listinfo/linux-ha-dev
> Home Page: http://linux-ha.org/
> 

_______________________________________________________
Linux-HA-Dev: [email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha-dev
Home Page: http://linux-ha.org/

Reply via email to