Dejan Muhamedagic wrote:
> Hi,
>
> On Wed, Jun 09, 2010 at 10:09:33PM -0400, Miles Fidelman wrote:
>    
>> <upgraded heartbeat2 to heartbeat3+pacemaker on Debian Lenny>
>>
>> All seems to have gone well, and pretty painlessly - everything is up
>> and running, with one remaining issue:
>>
>> As I understand it, after running "cibadmin --upgrade --force" the CIB
>> should be accessible through crm-shell (crm>  configure).
>>
>> However, when I do this, I get the error message:
>> INFO: object crm_config.id2244404 cannot be represented in the CLI notation
>>      
> What a strange id. Do you have such id? Can you show the config?
> In particular this element should've been shown as xml.
>    
ok... here's the xml (#crm show xml >file) - can anybody tell me what to 
make of this?

<?xml version="1.0" ?>
<cib admin_epoch="0" crm_feature_set="3.0.1" 
dc-uuid="14d2d0ed-59d1-4729-a149-d5
421b6a4988" epoch="108" have-quorum="1" num_updates="16" 
remote-tls-port="0" val
idate-with="pacemaker-1.0">
<configuration>
<crm_config>
<cluster_property_set id="crm_config.id2244404">
<nvpair id="nvp.id2244404" name="stonith-enabled" value="false"/>
</cluster_property_set>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="nvpair.id2244410" name="dc-version" value="1.0.8-2c98138c2f0
70fcb6ddeab1084154cffbf44ba75"/>
<nvpair id="nvpair.id2251490" name="is-managed-default" value="true"/>
<nvpair id="nvpair.id2251500" name="cluster-infrastructure" value="Heart
beat"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refre
sh" value="1276174323"/>
</cluster_property_set>
</crm_config>
<rsc_defaults/>
<op_defaults/>
<nodes>
<node id="14d2d0ed-59d1-4729-a149-d5421b6a4988" type="normal" uname="serve
server3:/var/log# cat temp
<?xml version="1.0" ?>
<cib admin_epoch="0" crm_feature_set="3.0.1" 
dc-uuid="14d2d0ed-59d1-4729-a149-d5421b6a4988" epoch="108" 
have-quorum="1" num_updates="16" remote-tls-port="0" 
validate-with="pacemaker-1.0">
<configuration>
<crm_config>
<cluster_property_set id="crm_config.id2244404">
<nvpair id="nvp.id2244404" name="stonith-enabled" value="false"/>
</cluster_property_set>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="nvpair.id2244410" name="dc-version" 
value="1.0.8-2c98138c2f070fcb6ddeab1084154cffbf44ba75"/>
<nvpair id="nvpair.id2251490" name="is-managed-default" value="true"/>
<nvpair id="nvpair.id2251500" name="cluster-infrastructure" 
value="Heartbeat"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" 
name="last-lrm-refresh" value="1276174323"/>
</cluster_property_set>
</crm_config>
<rsc_defaults/>
<op_defaults/>
<nodes>
<node id="14d2d0ed-59d1-4729-a149-d5421b6a4988" type="normal" 
uname="server2">
<instance_attributes id="instance_attributes.id2251520">
<nvpair id="nvpair.id2251526" name="standby" value="on"/>
</instance_attributes>
</node>
<node id="0444a71c-c1b0-40c2-83e5-c37005379450" type="normal" 
uname="server3">
<instance_attributes id="instance_attributes.id2251783">
<nvpair id="nvpair.id2251789" name="standby" value="false"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive class="ocf" id="production2" provider="heartbeat" type="Xen">
<meta_attributes id="primitive-production2.meta">
<nvpair id="nvpair.meta.auto-31" name="target-role" value="started"/>
</meta_attributes>
<operations>
<op id="production2-op-01" interval="10s" name="monitor" 
requires="nothing" timeout="60s"/>
<op id="production2-op-02" interval="0" name="start" start-delay="0" 
timeout="60s"/>
<op id="production2-op-03" interval="0" name="stop" timeout="300s"/>
</operations>
<instance_attributes id="instance_attributes.id2244127">
<nvpair id="nvpair.id2244132" name="xmfile" 
value="/etc/xen/production2.cfg"/>
</instance_attributes>
<instance_attributes id="instance_attributes.id2244147">
<nvpair id="nvpair.id2244152" name="target_role" value="started"/>
</instance_attributes>
</primitive>
<primitive class="ocf" id="server1" provider="heartbeat" type="Xen">
<meta_attributes id="primitive-server1.meta">
<nvpair id="primitive-server1.meta-target-role" name="target-role" 
value="Started"/>
</meta_attributes>
<operations>
<op id="server1-op-01" interval="10s" name="monitor" requires="nothing" 
timeout="60s"/>
<op id="server1-op-02" interval="0" name="start" start-delay="0" 
timeout="60s"/>
<op id="server1-op-03" interval="0" name="stop" timeout="300s"/>
</operations>
<instance_attributes id="instance_attributes.id2252133">
<nvpair id="nvpair.id2252138" name="xmfile" 
value="/etc/xen/newserver1.cfg"/>
</instance_attributes>
<meta_attributes id="meta_attributes.id2252147">
<nvpair id="meta_attributes.id2252147-target-role" name="target-role" 
value="Started"/>
</meta_attributes>
</primitive>
</resources>
<constraints>
<rsc_location id="cli-prefer-production2" rsc="production2">
<rule id="cli-prefer-rule-production2" score="INFINITY">
<expression attribute="#uname" id="expression.id2251620" operation="eq" 
value="server3"/>
</rule>
</rsc_location>
<rsc_location id="cli-prefer-server1" rsc="server1">
<rule id="cli-prefer-rule-server1" score="INFINITY">
<expression attribute="#uname" id="expression.id2251648" operation="eq" 
value="server2"/>
</rule>
</rsc_location>
</constraints>
</configuration>
</cib>

Thanks,

Miles

-- 
In theory, there is no difference between theory and practice.
In<fnord>  practice, there is.   .... Yogi Berra


_______________________________________________
Linux-HA mailing list
[email protected]
http://lists.linux-ha.org/mailman/listinfo/linux-ha
See also: http://linux-ha.org/ReportingProblems

Reply via email to