Hi 
How do i find out what the gc3pie version is?

My config file has no entry for "time_cmd" - i attach the config file.

Thanks
Jody

On Monday, November 21, 2016 at 11:51:42 AM UTC+1, Jody Weissmann wrote:
>
> Hi
>
> I have built a *SequentialTaskCollection* consisting of 6 Applications.
>
> When i run it on localhost it works fine, but on the science cloud i get a 
> message like 
> /home/centos/gc3libs.L4Kvjx/.gc3pie_shellcmd/wrapper_script.sh: line 4: 
> exec: None: not found
> for every application in the *SequentialTaskCollection*
>
> A new instance is started in the science cloud and the various output 
> directories as well as stdout and stderr files are created.
>
> The instance created is a fresh Centos 7.2 installationto which i added 
> some packages needed for my software to run (i.e. no gc3lib or so 
> installed).
>
> Does anybody know how to solve this problem?
>
> cheers
>   Jody
>
>

-- 
You received this message because you are subscribed to the Google Groups 
"gc3pie" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to gc3pie+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
#
# This is an example configuration file for GC3Pie,
# meant to be used in the "GC3Pie for users" course.
#
# At the end of the course, it will contain real working definitions
# of the UZH computing resources so it can also be used as a basis
# for your own configuration file for production use.
#

# run tasks on the computer where GC3Pie commands are typed
[resource/localhost]
enabled = no
type = shellcmd
frontend = localhost
transport = local
max_cores_per_job = 2
max_memory_per_core = 2GiB
max_walltime = 12 hours
max_cores = 2
architecture = x86_64
auth = none
override = no


# run tasks on the Hydra large-memory cluster
[resource/hydra]
enabled = no
type = slurm
frontend = login.s3it.uzh.ch
transport = ssh
auth = ssh_user_account
max_walltime = 1 day
max_cores = 96
max_cores_per_job = 64
max_memory_per_core = 1 TiB
architecture = x86_64
prologue_content =
  module load cluster/largemem

[auth/ssh_user_account]
type=ssh
# TO-DO: replace `uzh-shortname` with your actual UZH shortname
username=jweiss


# run jobs on the UZH "ScienceCloud"
[resource/sciencecloud]
enabled=yes
type=openstack+shellcmd
auth=openstack

vm_pool_max_size = 6 
security_group_name=default
security_group_rules=
  tcp:22:22:0.0.0.0/0,
  icmp:-1:-1:0.0.0.0/0
network_ids=
  c86b320c-9542-4032-a951-c8a068894cc2

# definition of a single execution VM:
# - flavor (i.e., size) of the VM
instance_type=1cpu-4ram-hpc
# - image_id of `Ubuntu Server 14.04.04 LTS (2016-05-19)`
##image_id=d2185996-00b4-4ce1-b291-147d2deebe94
#### CENTOS  IMAGE  ID   ###################################
## for "gqhg_centos_dreed_test"
##image_id=9f52a5c8-f178-4d34-9dd2-a5dc59dbd73b
## for "gqhg_centos7_3" (normal gqhg use)
##image_id=5d8192a6-3edf-4467-aa3f-833c347e97a7
## for "qhg_analysis_2" 
image_id=47c046b6-2b44-46dc-9dd2-eac42c5433be

max_cores_per_job = 16
#orig 4GiB
max_memory_per_core = 4GB
max_walltime = 90 days
max_cores = 97 
architecture = x86_64

# how to connect
vm_auth=ssh_user_centos
##keypair_name=dreed_gqhg_test
##public_key=~/.ssh/dreed_gqhg_test.pub
keypair_name=cloud_jw
public_key=~/.ssh/cloud_jw.pub

[auth/ssh_user_centos]
# default user on Centos VM images
type=ssh
username=centos

[auth/openstack]
# only need to set the `type` here; any other value will be taken from
# the `OS_*` environment variables
type = openstack

Reply via email to