Re: [Openstack] unexpected distribution of compute instances in queens

2018-11-27 Thread Zufar Dhiyaulhaq
Hi,

Thank you. I am able to fix this issue by adding this configuration into
nova configuration file in controller node.

driver=filter_scheduler

Best Regards
Zufar Dhiyaulhaq


On Tue, Nov 27, 2018 at 5:01 PM Zufar Dhiyaulhaq 
wrote:

> Hi Smooney,
> sorry for the last reply. I am attaching wrong configuration files. This
> is my nova configuration (added randomization from your suggestion) from
> the master node (Template jinja2 based).
>
> [DEFAULT]
> osapi_compute_listen = {{
> hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
> metadata_listen = {{
> hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
> enabled_apis = osapi_compute,metadata
> transport_url = rabbit://openstack:{{ rabbitmq_pw }}@{{ controller1_ip_man
> }}:5672,openstack:{{ rabbitmq_pw }}@{{ controller2_ip_man
> }}:5672,openstack:{{ rabbitmq_pw }}@{{ controller3_ip_man }}:5672
> my_ip = {{
> hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
> use_neutron = True
> firewall_driver = nova.virt.firewall.NoopFirewallDriver
> [api]
> auth_strategy = keystone
> [api_database]
> connection = mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova_api
> [barbican]
> [cache]
> backend=oslo_cache.memcache_pool
> enabled=true
> memcache_servers={{ controller1_ip_man }}:11211,{{ controller2_ip_man
> }}:11211,{{ controller3_ip_man }}:11211
> [cells]
> [cinder]
> os_region_name = RegionOne
> [compute]
> [conductor]
> [console]
> [consoleauth]
> [cors]
> [crypto]
> [database]
> connection = mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova
> [devices]
> [ephemeral_storage_encryption]
> [filter_scheduler]
> [glance]
> api_servers = http://{{ vip }}:9292
> [guestfs]
> [healthcheck]
> [hyperv]
> [ironic]
> [key_manager]
> [keystone]
> [keystone_authtoken]
> auth_url = http://{{ vip }}:5000/v3
> memcached_servers = {{ controller1_ip_man }}:11211,{{ controller2_ip_man
> }}:11211,{{ controller3_ip_man }}:11211
> auth_type = password
> project_domain_name = default
> user_domain_name = default
> project_name = service
> username = nova
> password = {{ nova_pw }}
> [libvirt]
> [matchmaker_redis]
> [metrics]
> [mks]
> [neutron]
> url = http://{{ vip }}:9696
> auth_url = http://{{ vip }}:35357
> auth_type = password
> project_domain_name = default
> user_domain_name = default
> region_name = RegionOne
> project_name = service
> username = neutron
> password = {{ neutron_pw }}
> service_metadata_proxy = true
> metadata_proxy_shared_secret = {{ metadata_secret }}
> [notifications]
> [osapi_v21]
> [oslo_concurrency]
> lock_path = /var/lib/nova/tmp
> [oslo_messaging_amqp]
> [oslo_messaging_kafka]
> [oslo_messaging_notifications]
> [oslo_messaging_rabbit]
> [oslo_messaging_zmq]
> [oslo_middleware]
> [oslo_policy]
> [pci]
> [placement]
> os_region_name = RegionOne
> project_domain_name = Default
> project_name = service
> auth_type = password
> user_domain_name = Default
> auth_url = http://{{ vip }}:5000/v3
> username = placement
> password = {{ placement_pw }}
> randomize_allocation_candidates = true
> [quota]
> [rdp]
> [remote_debug]
> [scheduler]
> discover_hosts_in_cells_interval = 300
> [serial_console]
> [service_user]
> [spice]
> [upgrade_levels]
> [vault]
> [vendordata_dynamic_auth]
> [vmware]
> [vnc]
> enabled = true
> keymap=en-us
> novncproxy_base_url = https://{{ vip }}:6080/vnc_auto.html
> novncproxy_host = {{
> hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
> [workarounds]
> [wsgi]
> [xenserver]
> [xvp]
> [placement_database]
> connection=mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova_placement
>
> Thank you
>
> Best Regards,
> Zufar Dhiyaulhaq
>
>
> On Tue, Nov 27, 2018 at 4:55 PM Zufar Dhiyaulhaq <
> zufardhiyaul...@gmail.com> wrote:
>
>> Hi Smooney,
>>
>> thank you for your help. I am trying to enable randomization but not
>> working. The instance I have created is still in the same node. Below is my
>> nova configuration (added randomization from your suggestion) from the
>> master node (Template jinja2 based).
>>
>> [DEFAULT]
>> enabled_apis = osapi_compute,metadata
>> transport_url = rabbit://openstack:{{ rabbitmq_pw }}@{{
>> controller1_ip_man }}:5672,openstack:{{ rabbitmq_pw }}@{{
>> controller2_ip_man }}:5672,openstack:{{ rabbitmq_pw }}@{{
>> controller3_ip_man }}:5672
>> my_ip = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4'][
>> 'address'] }}
>> use_neutron = True
>> firewall_driver = nova.virt.firewall.NoopFirewallDriver
>> [api]
>> auth_stra

Re: [Openstack] unexpected distribution of compute instances in queens

2018-11-27 Thread Zufar Dhiyaulhaq
Hi Smooney,
sorry for the last reply. I am attaching wrong configuration files. This is
my nova configuration (added randomization from your suggestion) from the
master node (Template jinja2 based).

[DEFAULT]
osapi_compute_listen = {{
hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
metadata_listen = {{
hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:{{ rabbitmq_pw }}@{{ controller1_ip_man
}}:5672,openstack:{{ rabbitmq_pw }}@{{ controller2_ip_man
}}:5672,openstack:{{ rabbitmq_pw }}@{{ controller3_ip_man }}:5672
my_ip = {{
hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova_api
[barbican]
[cache]
backend=oslo_cache.memcache_pool
enabled=true
memcache_servers={{ controller1_ip_man }}:11211,{{ controller2_ip_man
}}:11211,{{ controller3_ip_man }}:11211
[cells]
[cinder]
os_region_name = RegionOne
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
connection = mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://{{ vip }}:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://{{ vip }}:5000/v3
memcached_servers = {{ controller1_ip_man }}:11211,{{ controller2_ip_man
}}:11211,{{ controller3_ip_man }}:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = {{ nova_pw }}
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://{{ vip }}:9696
auth_url = http://{{ vip }}:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = {{ neutron_pw }}
service_metadata_proxy = true
metadata_proxy_shared_secret = {{ metadata_secret }}
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://{{ vip }}:5000/v3
username = placement
password = {{ placement_pw }}
randomize_allocation_candidates = true
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
keymap=en-us
novncproxy_base_url = https://{{ vip }}:6080/vnc_auto.html
novncproxy_host = {{
hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
[workarounds]
[wsgi]
[xenserver]
[xvp]
[placement_database]
connection=mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova_placement

Thank you

Best Regards,
Zufar Dhiyaulhaq


On Tue, Nov 27, 2018 at 4:55 PM Zufar Dhiyaulhaq 
wrote:

> Hi Smooney,
>
> thank you for your help. I am trying to enable randomization but not
> working. The instance I have created is still in the same node. Below is my
> nova configuration (added randomization from your suggestion) from the
> master node (Template jinja2 based).
>
> [DEFAULT]
> enabled_apis = osapi_compute,metadata
> transport_url = rabbit://openstack:{{ rabbitmq_pw }}@{{ controller1_ip_man
> }}:5672,openstack:{{ rabbitmq_pw }}@{{ controller2_ip_man
> }}:5672,openstack:{{ rabbitmq_pw }}@{{ controller3_ip_man }}:5672
> my_ip = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4'][
> 'address'] }}
> use_neutron = True
> firewall_driver = nova.virt.firewall.NoopFirewallDriver
> [api]
> auth_strategy = keystone
> [api_database]
> [barbican]
> [cache]
> backend=oslo_cache.memcache_pool
> enabled=true
> memcache_servers={{ controller1_ip_man }}:11211,{{ controller2_ip_man
> }}:11211,{{ controller3_ip_man }}:11211
> [cells]
> [cinder]
> [compute]
> [conductor]
> [console]
> [consoleauth]
> [cors]
> [crypto]
> [database]
> [devices]
> [ephemeral_storage_encryption]
> [filter_scheduler]
> [glance]
> api_servers = http://{{ vip }}:9292
> [guestfs]
> [healthcheck]
> [hyperv]
> [ironic]
> [key_manager]
> [keystone]
> [keystone_authtoken]
> auth_url = http://{{ vip }}:5000/v3
> memcached_servers = {{ controller1_ip_man }}:11211,{{ controller2_ip_man
> }}:11211,{{ controller3_ip_man }}:11211
> auth_type = password
> project_domain_name = default
> user_domain_name = default
> project_name = service
> username = nova
> password = {{ nova_pw }}
> [libvirt]
> virt_type = kvm
&g

Re: [Openstack] unexpected distribution of compute instances in queens

2018-11-27 Thread Zufar Dhiyaulhaq
Hi Smooney,

thank you for your help. I am trying to enable randomization but not
working. The instance I have created is still in the same node. Below is my
nova configuration (added randomization from your suggestion) from the
master node (Template jinja2 based).

[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:{{ rabbitmq_pw }}@{{ controller1_ip_man
}}:5672,openstack:{{ rabbitmq_pw }}@{{ controller2_ip_man
}}:5672,openstack:{{ rabbitmq_pw }}@{{ controller3_ip_man }}:5672
my_ip = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address']
}}
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
[barbican]
[cache]
backend=oslo_cache.memcache_pool
enabled=true
memcache_servers={{ controller1_ip_man }}:11211,{{ controller2_ip_man
}}:11211,{{ controller3_ip_man }}:11211
[cells]
[cinder]
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://{{ vip }}:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://{{ vip }}:5000/v3
memcached_servers = {{ controller1_ip_man }}:11211,{{ controller2_ip_man
}}:11211,{{ controller3_ip_man }}:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = {{ nova_pw }}
[libvirt]
virt_type = kvm
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://{{ vip }}:9696
auth_url = http://{{ vip }}:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = {{ neutron_pw }}
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://{{ vip }}:5000/v3
username = placement
password = {{ placement_pw }}
[quota]
[rdp]
[remote_debug]
[scheduler]
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = True
keymap=en-us
server_listen = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4'][
'address'] }}
server_proxyclient_address = {{ hostvars[inventory_hostname][
'ansible_ens3f1']['ipv4']['address'] }}
novncproxy_base_url = https://{{ vip }}:6080/vnc_auto.html
[workarounds]
[wsgi]
[xenserver]
[xvp]

Thank you,

Best Regards,
Zufar Dhiyaulhaq

On Mon, Nov 26, 2018 at 11:13 PM Sean Mooney  wrote:

> On Mon, 2018-11-26 at 17:45 +0700, Zufar Dhiyaulhaq wrote:
> > Hi,
> >
> > I am deploying OpenStack with 3 compute node, but I am seeing an
> abnormal distribution of instance, the instance is
> > only deployed in a specific compute node, and not distribute among other
> compute node.
> >
> > this is my nova.conf from the compute node. (template jinja2 based)
>
> hi, the default behavior of nova used to be spread not pack and i belive
> it still is.
> the default behavior with placement however is closer to a packing
> behavior as
> allcoation candiates are retrunidn in an undefined but deterministic order.
>
> on a busy cloud this does not strictly pack instaces but on a quite cloud
> it effectivly does
>
> you can try and enable randomisation of the allocation candiates by
> setting this config option in
> the nova.conf of the shcduler to true.
>
> https://docs.openstack.org/nova/latest/configuration/config.html#placement.randomize_allocation_candidates
>
> on that note can you provide the nova.conf for the schduelr is used
> instead of the compute node nova.conf.
> if you have not overriden any of the nova defaults the ram and cpu weigher
> should spread instances withing
> the allocation candiates returned by placement.
>
> >
> > [DEFAULT]
> > osapi_compute_listen = {{
> hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
> > metadata_listen = {{
> hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
> > enabled_apis = osapi_compute,metadata
> > transport_url = rabbit://openstack:{{ rabbitmq_pw }}@{{
> controller1_ip_man }}:5672,openstack:{{ rabbitmq_pw }}@{{
> > controller2_ip_man }}:5672,openstack:{{ rabbitmq_pw }}@{{
> controller3_ip_man }}:5672
> > my_ip = {{
> hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address'] }}
> > use_neutron = True
> > firewall_driver = nova.virt.firewall.NoopFirewallDriver
> > [api]
> > auth_strategy = keystone
> > [api_database]
> > connection = mysql+pymysql://nova:{{ nova_dbpw

[Openstack] unexpected distribution of compute instances in queens

2018-11-26 Thread Zufar Dhiyaulhaq
Hi,

I am deploying OpenStack with 3 compute node, but I am seeing an abnormal
distribution of instance, the instance is only deployed in a specific
compute node, and not distribute among other compute node.

this is my nova.conf from the compute node. (template jinja2 based)

[DEFAULT]
osapi_compute_listen = {{ hostvars[inventory_hostname]['ansible_ens3f1'][
'ipv4']['address'] }}
metadata_listen = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4'][
'address'] }}
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:{{ rabbitmq_pw }}@{{ controller1_ip_man
}}:5672,openstack:{{ rabbitmq_pw }}@{{ controller2_ip_man
}}:5672,openstack:{{ rabbitmq_pw }}@{{ controller3_ip_man }}:5672
my_ip = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4']['address']
}}
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api]
auth_strategy = keystone
[api_database]
connection = mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova_api
[barbican]
[cache]
backend=oslo_cache.memcache_pool
enabled=true
memcache_servers={{ controller1_ip_man }}:11211,{{ controller2_ip_man
}}:11211,{{ controller3_ip_man }}:11211
[cells]
[cinder]
os_region_name = RegionOne
[compute]
[conductor]
[console]
[consoleauth]
[cors]
[crypto]
[database]
connection = mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova
[devices]
[ephemeral_storage_encryption]
[filter_scheduler]
[glance]
api_servers = http://{{ vip }}:9292
[guestfs]
[healthcheck]
[hyperv]
[ironic]
[key_manager]
[keystone]
[keystone_authtoken]
auth_url = http://{{ vip }}:5000/v3
memcached_servers = {{ controller1_ip_man }}:11211,{{ controller2_ip_man
}}:11211,{{ controller3_ip_man }}:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = {{ nova_pw }}
[libvirt]
[matchmaker_redis]
[metrics]
[mks]
[neutron]
url = http://{{ vip }}:9696
auth_url = http://{{ vip }}:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = {{ neutron_pw }}
service_metadata_proxy = true
metadata_proxy_shared_secret = {{ metadata_secret }}
[notifications]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[pci]
[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://{{ vip }}:5000/v3
username = placement
password = {{ placement_pw }}
[quota]
[rdp]
[remote_debug]
[scheduler]
discover_hosts_in_cells_interval = 300
[serial_console]
[service_user]
[spice]
[upgrade_levels]
[vault]
[vendordata_dynamic_auth]
[vmware]
[vnc]
enabled = true
keymap=en-us
novncproxy_base_url = https://{{ vip }}:6080/vnc_auto.html
novncproxy_host = {{ hostvars[inventory_hostname]['ansible_ens3f1']['ipv4'][
'address'] }}
[workarounds]
[wsgi]
[xenserver]
[xvp]
[placement_database]
connection=mysql+pymysql://nova:{{ nova_dbpw }}@{{ vip }}/nova_placement

what is the problem? I have lookup the openstack-nova-scheduler in the
controller node but it's running well with only warning

 nova-scheduler[19255]:
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:332:
NotSupportedWarning: Configuration option(s) ['use_tpool'] not supported

the result I want is the instance is distributed in all compute node.
Thank you.

-- 

*Regards,*
*Zufar Dhiyaulhaq*
___
Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack
Post to : openstack@lists.openstack.org
Unsubscribe : http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack


Re: [Openstack] OpenStack neutron error

2018-08-02 Thread Zufar Dhiyaulhaq
HI Eugen,

Thanks for the solution, i think the docs was wrong. now im fix this issue.

thank you.

On Thu, Aug 2, 2018 at 7:49 PM, Eugen Block  wrote:

> Hi,
>
> the description in [1] sounds very similar to your problem and seems to be
> a bug in the docs. Can you check the ports you configured for keystone and
> which ports you have set in neutron configs?
>
> Regards,
> Eugen
>
> [1] https://ask.openstack.org/en/question/114642/neutron-configu
> ration-errot-failed-to-retrieve-extensions-list-from-network-api/
>
>
> Zitat von Zufar Dhiyaulhaq :
>
> Hi, im trying to install openstack queens from sratch (manual) from
>> openstack documentation. but i have problem in neutron. when im try to
>> verify with `openstack netwrok agent list` there are error `HTTP exception
>> unknown error`
>>
>> when im check the logs from controller
>> in`/var/log/neutron/neutron-server.log` i have this error
>>
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors [-] An
>> error occurred during processing the request: GET /v2.0/extensions
>> HTTP$
>> Accept: application/json
>> Accept-Encoding: gzip, deflate
>> Connection: keep-alive
>> Content-Type: text/plain
>> Host: controller:9696
>> User-Agent: python-neutronclient
>> X-Auth-Token: *: DiscoveryFailure: Could not determine a suitable
>> URL for the plugin
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> Traceback (most recent call last):
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/oslo_middleware/catch_errors.py",
>> lin$
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> response = req.get_response(self.application)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/webob/request.py", line 1316, in
>> send
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> application, catch_exc_info=False)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/webob/request.py", line 1280, in
>> call$
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> app_iter = application(self.environ, start_response)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/webob/dec.py", line 131, in __call__
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> resp = self.call_func(req, *args, **self.kwargs)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/webob/dec.py", line 196, in
>> call_func
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> return self.func(req, *args, **kwargs)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/keystonemiddleware/auth_token/__init_$
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> response = self.process_request(req)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/keystonemiddleware/auth_token/__init_$
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> resp = super(AuthProtocol, self).process_request(request)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/keystonemiddleware/auth_token/__init_$
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> allow_expired=allow_expired)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> Traceback (most recent call last):
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/oslo_middleware/catch_errors.py",
>> lin$
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> response = req.get_response(self.application)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/webob/request.py", line 1316, in
>> send
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> application, catch_exc_info=False)
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
>> "/usr/lib/python2.7/dist-packages/webob/request.py", line 1280, in
>> call$
>> 2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
>> app_iter = ap

[Openstack] OpenStack neutron error

2018-08-02 Thread Zufar Dhiyaulhaq
86 ERROR oslo_middleware.catch_errors
allow_expired=allow_expired)
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystonemiddleware/auth_token/__init_$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
data = self.fetch_token(token, **kwargs)
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystonemiddleware/auth_token/__init_$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
allow_expired=allow_expired)
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystonemiddleware/auth_token/_identi$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
auth_ref = self._request_strategy.verify_token(
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystonemiddleware/auth_token/_identi$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
strategy_class = self._get_strategy_class()
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystonemiddleware/auth_token/_identi$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors if
self._adapter.get_endpoint(version=klass.AUTH_VERSION):
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystoneauth1/adapter.py", line 223,
$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
return self.session.get_endpoint(auth or self.auth, **kwargs)
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystoneauth1/session.py", line 942,
$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
return auth.get_endpoint(self, **kwargs)
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystoneauth1/identity/base.py",
line$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
allow_version_hack=allow_version_hack, **kwargs)
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors   File
"/usr/lib/python2.7/dist-packages/keystoneauth1/identity/base.py",
line$
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
service_catalog = self.get_access(session).service_catalog
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
raise exceptions.DiscoveryFailure('Could not determine a suitable URL
'
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
DiscoveryFailure: Could not determine a suitable URL for the plugin
2018-08-02 19:21:37.511 2486 ERROR oslo_middleware.catch_errors
2018-08-02 19:21:37.512 2486 INFO neutron.wsgi [-] 10.100.0.70 "GET
/v2.0/extensions HTTP/1.1" status: 500  len: 404 time: 0.0035110


-- 

*Regards,*
*Zufar Dhiyaulhaq*
___
Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack
Post to : openstack@lists.openstack.org
Unsubscribe : http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack


[Openstack] [neutron] Cannot acces provider network (Openstack Packstack Opendaylight integration)

2018-01-25 Thread Zufar Dhiyaulhaq
Hi everyone, I try to integerate Openstack that build with packstack
(Centos) with OpenDayLight.
this is my topology

Openstack Controller : 10.210.210.10 & 10.211.211.10
- eth1 : 10.211.211.10/24
- eth0 : 10.210.210.10/24

Openstack Compute : 10.210.210.20 & 10.211.211.20
- eth1 : 10.211.211.20/24
- eth0 : 10.210.210.20/24

OpenDayLight : 10.210.210.30
- eth1 : 10.210.210.30/24

Provider Network : 10.211.211.0/24
Tenant Network : 10.210.210.0/24

Openstack Version : Newton
OpenDayLight Version : Nitrogen SR1

this is my packstack configuration changes

CONFIG_HEAT_INSTALL=y
CONFIG_NEUTRON_FWAAS=y
CONFIG_NEUTRON_VPNAAS=y
CONFIG_LBAAS_INSTALL=y

CONFIG_CINDER_INSTALL=n
CONFIG_SWIFT_INSTALL=n
CONFIG_CEILOMETER_INSTALL=n
CONFIG_AODH_INSTALL=n
CONFIG_GNOCCHI_INSTALL=n
CONFIG_NAGIOS_INSTALL=n
CONFIG_PROVISION_DEMO=n

CONFIG_COMPUTE_HOSTS=10.X0.X0.20
CONFIG_USE_EPEL=y
CONFIG_KEYSTONE_ADMIN_PW=rahasia
CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan,gre,vlan,flat,local
CONFIG_NEUTRON_ML2_FLAT_NETWORKS=external
CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=external:br-ex
CONFIG_NEUTRON_OVS_BRIDGE_IFACES=br-ex:eth1
CONFIG_NEUTRON_OVS_BRIDGES_COMPUTE=br-ex

I try to follow this tutorial : http://docs.opendaylight.org/
en/stable-nitrogen/submodules/netvirt/docs/openstack-guide/
openstack-with-netvirt.html

the instance is getting dhcp in tenant network and ping the ip tenant
router gateway. but i cant ping all of provider network.

this is all of my configuration when integrating with opendaylight

## OPENDAYLIGHT ##

** Set ACL
mkdir -p etc/opendaylight/datastore/initial/config/
cp system/org/opendaylight/netvirt/aclservice-impl/0.5.1/
aclservice-impl-0.5.1-config.xml etc/opendaylight/datastore/
initial/config/netvirt-aclservice-config.xml
sed -i s/stateful/transparent/ etc/opendaylight/datastore/
initial/config/netvirt-aclservice-config.xml

export JAVA_HOME=/usr/java/jdk1.8.0_162/jre
./bin/karaf

** Install Feature
feature:install odl-dluxapps-nodes odl-dlux-core odl-dluxapps-topology
odl-dluxapps-applications odl-netvirt-openstack odl-netvirt-ui
odl-mdsal-apidocs odl-l2switch-all

## OPENSTACK CONTROLLER NODE ##

systemctl stop neutron-server
systemctl stop neutron-openvswitch-agent
systemctl disable neutron-openvswitch-agent
systemctl stop neutron-l3-agent
systemctl disable neutron-l3-agent

systemctl stop openvswitch
rm -rf /var/log/openvswitch/*
rm -rf /etc/openvswitch/conf.db
systemctl start openvswitch

ovs-vsctl set-manager tcp:10.210.210.30:6640
ovs-vsctl del-port br-int eth1
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eth1
ovs-vsctl set-controller br-ex tcp:10.210.210.30:6653

ovs-vsctl set Open_vSwitch . other_config:local_ip=10.210.210.10
ovs-vsctl get Open_vSwitch . other_config

yum -y install python-networking-odl

crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2
mechanism_drivers opendaylight
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2
tenant_network_types vxlan

cat <> /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2_odl]
password = admin
username = admin
url = http://10.210.210.30:8080/controller/nb/v2/neutron
EOT

crudini --set /etc/neutron/plugins/neutron.conf DEFAULT service_plugins
odl-router
crudini --set /etc/neutron/plugins/dhcp_agent.ini OVS ovsdb_interface
vsctl

mysql -e "DROP DATABASE IF EXISTS neutron;"
mysql -e "CREATE DATABASE neutron CHARACTER SET utf8;"
neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file
/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head

systemctl start neutron-server
sudo ovs-vsctl set Open_vSwitch . other_config:provider_
mappings=external:br-ex

## OPENSTACK COMPUTE NODE ##

systemctl stop neutron-openvswitch-agent
systemctl disable neutron-openvswitch-agent
systemctl stop neutron-l3-agent
systemctl disable neutron-l3-agent

systemctl stop openvswitch
rm -rf /var/log/openvswitch/*
rm -rf /etc/openvswitch/conf.db

systemctl start openvswitch

ovs-vsctl set-manager tcp:10.210.210.30:6640
ovs-vsctl set-manager tcp:10.210.210.30:6640
ovs-vsctl del-port br-int eth1
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eth1
ovs-vsctl set-controller br-ex tcp:10.210.210.30:6653

ovs-vsctl set Open_vSwitch . other_config:local_ip=10.210.210.20
ovs-vsctl get Open_vSwitch . other_config

yum -y install python-networking-odl

sudo ovs-vsctl set Open_vSwitch . other_config:provider_
mappings=external:br-ex

## REPORT ##


## OVS-VSCTL SHOW ##
### CONTROLLER ###
[root@pod21-controller ~]# ovs-vsctl show
525fbe7c-e60c-4135-b0a5-178d76c04529
Manager "ptcp:6640:127.0.0.1"
is_connected: true
Bridge br-tun
Controller "tcp:127.0.0.1:6633"
is_connected: true