On Mon, Mar 11, 2013 at 11:21 AM, Lucas Meneghel Rodrigues
<[email protected]> wrote:
> On Thu, Mar 7, 2013 at 2:16 PM, Nishanth Aravamudan
> <[email protected]> wrote:
>> Use an RPC call, just like for specific hosts, for finding all possible
>> profiles. We let the user shoot themselves in the foot (that is, they
>> might select a profile that doesn't apply to that server), but that's
>> the nature of selecting by label -- unless the label is a platform, we
>> won't be able to do the lookup anyways in the install server.
>
> Fair enough. I found 2 issues during review of the patch, see below.
>
>> Update the create_job interface appropriately.
>>
>> Signed-off-by: Nishanth Aravamudan <[email protected]>
>>
>> ---
>> I have not tested the cli submission of jobs yet. I *think* it should
>> work similar to the regular host. But I need to look into it still.
>>
>>
>> diff --git a/frontend/afe/rpc_interface.py b/frontend/afe/rpc_interface.py
>> index e316af9..8308e1e 100644
>> --- a/frontend/afe/rpc_interface.py
>> +++ b/frontend/afe/rpc_interface.py
>> @@ -264,6 +264,50 @@ def get_num_hosts(multiple_labels=(), 
>> exclude_only_if_needed_labels=False,
>>      return hosts.count()
>>
>>
>> +def get_profiles():
>> +    install_server = None
>> +    install_server_info = get_install_server_info()
>> +    install_server_type = install_server_info.get('type', None)
>> +    install_server_url = install_server_info.get('xmlrpc_url', None)
>> +
>> +    if install_server_type == 'cobbler' and install_server_url:
>> +        install_server = xmlrpclib.ServerProxy(install_server_url)
>> +
>> +    error_encountered = True
>> +    profile_dicts = []
>> +    if install_server is not None:
>> +        profiles = install_server.get_item_names('profile')
>> +
>> +        if len(profiles) < 1:
>> +            msg = 'No profiles defined on install server'
>> +            rpc_logger = logging.getLogger('rpc_logger')
>> +            rpc_logger.info(msg, host_dict['hostname'])
>
> ^ The way this is done, it'll cause a logging exception, since there's
> no %s or similar in msg.

Oh, I just realize there's no host_dict on that scope. I guess it was
a mistake. Just logging msg should be enough.

>> +        else:
>> +            error_encountered = False
>> +            # not sorted
>> +            profiles.sort()
>> +            profile_dicts.append(dict(name="Do_not_install"))
>> +            for profile in profiles:
>> +                profile_dicts.append(dict(name=profile))
>> +
>> +    if error_encountered:
>> +       profile_dicts.append(dict(name="N/A"))
>> +
>> +    return rpc_utils.prepare_for_serialization(profile_dicts)
>> +
>> +
>> +def get_num_profiles():
>> +    """
>> +    Same parameters as get_profiles().
>> +
>> +    @returns The number of defined profiles.
>> +    """
>> +    profiles = install_server.get_item_names('profile')
>> +
>> +    # Add the do not install entry
>> +    return len(profiles) + 1
>> +
>>  # tests
>>
>>  def add_test(name, test_type, path, author=None, dependencies=None,
>> @@ -449,7 +493,7 @@ def create_parameterized_job(name, priority, test, 
>> parameters, kernel=None,
>>                               profiler_parameters=None,
>>                               use_container=False, profile_only=None,
>>                               upload_kernel_config=False, hosts=(),
>> -                             meta_hosts=(), one_time_hosts=(),
>> +                             meta_hosts=(), meta_host_profiles=(), 
>> one_time_hosts=(),
>>                               atomic_group_name=None, synch_count=None,
>>                               is_template=False, timeout=None,
>>                               max_runtime_hrs=None, run_verify=True,
>> @@ -532,10 +576,10 @@ def create_parameterized_job(name, priority, test, 
>> parameters, kernel=None,
>>
>>
>>  def create_job(name, priority, control_file, control_type,
>> -               hosts=(), profiles=(), meta_hosts=(), one_time_hosts=(),
>> -               atomic_group_name=None, synch_count=None, is_template=False,
>> -               timeout=None, max_runtime_hrs=None, run_verify=True,
>> -               email_list='', dependencies=(), reboot_before=None,
>> +               hosts=(), profiles=(), meta_hosts=(), meta_host_profiles=(),
>> +               one_time_hosts=(), atomic_group_name=None, synch_count=None,
>> +               is_template=False, timeout=None, max_runtime_hrs=None,
>> +               run_verify=True, email_list='', dependencies=(), 
>> reboot_before=None,
>>                 reboot_after=None, parse_failed_repair=None, hostless=False,
>>                 keyvals=None, drone_set=None):
>>      """\
>> diff --git a/frontend/afe/rpc_utils.py b/frontend/afe/rpc_utils.py
>> index ee3a0b0..730156e 100644
>> --- a/frontend/afe/rpc_utils.py
>> +++ b/frontend/afe/rpc_utils.py
>> @@ -458,10 +458,11 @@ def check_for_duplicate_hosts(host_objects):
>>
>>
>>  def create_new_job(owner, options, host_objects, profiles, metahost_objects,
>> -                   atomic_group=None):
>> +                   metahost_profiles, atomic_group=None):
>
> ^ The change in interface here should be passed to the other caller of
> this function, in scheduler/monitor_db.py. Otherwise we might end up
> having problems.



-- 
Lucas

_______________________________________________
Autotest-kernel mailing list
[email protected]
https://www.redhat.com/mailman/listinfo/autotest-kernel

Reply via email to