Hello,

We are testing this version of robinhood but it is not purging files for some 
reason.

Product:         robinhood
Version:         2.5.5-1
Build:           2015-06-10 15:05:13

Compilation switches:
    Temporary filesystem manager
    Address entries by FID
    MDT Changelogs supported

Lustre Version: 2.4
Database binding: MySQL


We just need a policy to delete files that are older than N days in a certain 
directory.  In my testing I am using 1d access time in the path 
/lustre2/robinhood/  I have created two files and set timestamp to last year 
but robinhood does not automatically purge them.  

Rbh-report shows the files and the fileclass ok but does not delete them. 

rbh-report -D -C robinhoodtest
Using config file '/etc/robinhood.d/tmpfs/tmpfs_detailed.conf'.
filter class: robinhoodtest
    type,       size,       user,      group,          purge class,             
                        path
    file,          0,   l0363734,    rtstaff,        robinhoodtest,             
      /lustre2/robinhood/foo
    file,          0,   l0363734,    rtstaff,        robinhoodtest,             
      /lustre2/robinhood/qbz

Total: 2 entries, 0 bytes (0)

 I am pasting our configuration in the hopes that someone can clarify what is 
going on here.  Also,  how often does robinhood check to see what to purge? Are 
we supposed to run an initial filesystem scan 

Here are the two files that I thought should be rm'd but are not.  They should 
be a part of the fileclass robinhoodtest with the path /lustre2/robinhood  

[root@hous0162 robinhood]# stat foo qbz
  File: `foo'
  Size: 0               Blocks: 0          IO Block: 4194304 regular empty file
Device: 9ac965eh/162305630d     Inode: 144115205574492206  Links: 1
Access: (0644/-rw-r--r--)  Uid: ( 1378/l0363734)   Gid: (  512/ rtstaff)
Access: 2015-05-17 10:17:37.000000000 -0500
Modify: 2015-05-17 10:17:37.000000000 -0500
Change: 2016-05-17 10:17:37.000000000 -0500
  File: `qbz'
  Size: 0               Blocks: 0          IO Block: 4194304 regular empty file
Device: 9ac965eh/162305630d     Inode: 144115205574492209  Links: 1
Access: (0644/-rw-r--r--)  Uid: ( 1378/l0363734)   Gid: (  512/ rtstaff)
Access: 2015-05-17 10:17:37.000000000 -0500
Modify: 2015-05-17 10:17:37.000000000 -0500
Change: 2016-05-17 10:17:37.000000000 -0500



[root@hous0162 tmpfs]# ls -la /lustre2/robinhood/
total 8
drwxrwxrwt 2 l0363734 rtstaff 4096 May 17 10:12 .
drwxr-xr-x 9 root     root    4096 May 17 09:43 ..
-rw-r--r-- 1 l0363734 rtstaff    0 May 17  2015 foo
-rw-r--r-- 1 l0363734 rtstaff    0 May 17  2015 qbz




Below is the configuration file that we are using for testing and we are 
starting the robinhood daemon like  

/usr/sbin/robinhood --purge -d -f /etc/robinhood.d/tmpfs/tmpfs_detailed.conf -p 
/var/run/rbh.tmpfs_detailed




General
{
    # filesystem to be monitored
    fs_path = "/lustre2" ;

    # file for suspending all actions
    lock_file = "/var/locks/robinhood.lock" ;

    # check that objects are in the same device as 'fs_path',
    # so it will not traverse mount points
    stay_in_fs = TRUE ;

    # check that the filesystem is mounted
    check_mounted = TRUE ;
}

Log
{
    # Log verbosity level
    # Possible values are: CRIT, MAJOR, EVENT, VERB, DEBUG, FULL
    debug_level = EVENT ;

    # Log file
    log_file = "/var/log/robinhood/robinhood.log" ;

    # File for reporting purge events
    report_file = "/var/log/robinhood/robinhood_reports.log" ;

    # set alert_file, alert_mail or both depending on the alert method you wish
    alert_file = "/var/log/robinhood/robinhood_alerts.log" ;
    alert_mail = "[email protected], [email protected]" ;

    # Interval for dumping stats (to logfile)
    stats_interval = 20min ;

    # Alert batching (to send a digest instead of 1 alert per file)
    # 0: unlimited batch size, 1: no batching (1 alert per file),
    # N>1: batch N alerts per digest
    batch_alert_max = 5000 ;
    # Give the detail of entry attributes for each alert?
    alert_show_attrs = FALSE ;

    # whether the process name appears in the log line
    log_procname = TRUE;
    # whether the host name appears in the log line
    log_hostname = TRUE;
    # whether the module name appears in the log line
    log_module = TRUE;
}

ListManager
{
    # Method for committing information to database.
    # Possible values are:
    # - "autocommit": weak transactions (more efficient, but database 
inconsistencies may occur)
    # - "transaction": manage operations in transactions (best consistency, 
lower performance)
    # - "periodic(<nb_transaction>)": periodically commit (every <n> 
transactions).
    commit_behavior = transaction ;

    # Minimum time (in seconds) to wait before trying to reestablish a lost 
connection.
    # Then this time is multiplied by 2 until reaching 
connect_retry_interval_max
    connect_retry_interval_min = 1 ;
    connect_retry_interval_max = 30 ;
    # disable the following options if you are not interested in
    # user or group stats (to speed up scan)
    user_acct  = enabled ;
    group_acct = enabled ;

    MySQL
    {
        server = "localhost" ;
        db     = "robinhood_lustre2" ;
        user   = "robinhood" ;
        password_file = "/etc/robinhood.d/.dbpassword" ;
        # port   = 3306 ;
        # socket = "/tmp/mysql.sock" ;
        #innodb = enabled ;
        engine = InnoDB ;

    }
}


Filesets
{
    FileClass   scratch
    {
        definition
        {
            tree == "/lustre2/scratch"
        }
    }

    FileClass   robinhoodtest
    {
        definition
        {
            tree == "/lustre2/robinhood"
        }
    }



}

purge_policies
{

    # Default purge policy.
    # This applies to files that don't match previous fileclasses, i.e:
    #   - don't match the 'ignore' block
    #   - don't match a fileclass of 'ignore_fileclass' directives
    #   - don't match any 'target_fileclass' of purge policies above
    #policy     default
    #{
    #    condition
    #    {
    #        last_access > 12h
    #    }
    #}
    policy purge_scratch {
        target_fileclass = scratch;
        condition {last_access > 14d}
}
    policy purge_mattbtest {
        target_fileclass = robinhoodtest;
        condition {last_access > 1d}
}
}

rmdir_policy
{
    # Remove directories that have been empty for more than 8 days
    age_rm_empty_dirs   = 14d ;

    # don't remove dirs that owns to 'root' or whose depth < 2
    ignore
    {
        owner == root
        or depth < 2
    }

    # Recursively remove directories matching this condition
    # (/!\ removes all the directory content)
    #recursive_rmdir
    #{
    #    path == /mnt/lustre/old/*
    #    and last_mod > 30d
    #}
}
db_update_policy
{
    # possible policies for refreshing metadata and path in database:
    #   never: get the information once, then never refresh it
    #   always: always update entry info when processing it
    #   on_event: only update on related event
    #   periodic(interval): only update periodically
    #   on_event_periodic(min_interval,max_interval)= on_event + periodic

    # Updating of file metadata
    md_update = always ;
    # Updating file path in database
    path_update = on_event_periodic(0,1h) ;
    # File classes matching
    fileclass_update = always ;
}

EntryProcessor
{
    # Raise alerts for directories with too many entries
    Alert       Too_many_entries_in_directory
    {
        type == directory
        and
        dircount > 10000
    }

    # Raise alerts for large files
    Alert       Large_file
    {
        type == file
        and
        size > 500GB
    }

    # nbr of worker threads for processing pipeline tasks
    nb_threads = 4 ;

    # Max number of operations in the Entry Processor pipeline.
    # If the number of pending operations exceeds this limit,
    # info collectors are suspended until this count decreases
    max_pending_operations = 10000 ;

    # max batched DB operations (1=no batching)
    max_batch_size = 1000;

    # Optionnaly specify a maximum thread count for each stage of the pipeline:
    # <stagename>_threads_max = <n> (0: use default)
    # STAGE_GET_FID_threads_max = 4 ;
    # STAGE_GET_INFO_DB_threads_max     = 4 ;
    # STAGE_GET_INFO_FS_threads_max     = 4 ;
    # STAGE_REPORTING_threads_max       = 4 ;
    # STAGE_PRE_APPLY_threads_max       = 4 ;
    # Disable batching (max_batch_size=1) to allow parallelizing the following 
step:
    # STAGE_DB_APPLY_threads_max        = 4 ;

    # if set to FALSE, classes will only be matched
    # at policy application time (not during a scan or reading changelog)
    match_classes = TRUE;

    # Faking mtime to an old time causes the file to be migrated
    # with top priority. Enabling this parameter detect this behavior
    # and doesn't allow  mtime < creation_time
    detect_fake_mtime = FALSE;
}

FS_Scan
{
    # simple scan interval (fixed)
    scan_interval      =   2d ;

    # min/max for adaptive scan interval:
    # the more the filesystem is full, the more frequently it is scanned.
    #min_scan_interval      =   24h ;
    #max_scan_interval      =    7d ;

    # number of threads used for scanning the filesystem
    nb_threads_scan        =     4 ;

    # when a scan fails, this is the delay before retrying
    scan_retry_delay       =    1h ;

    # timeout for operations on the filesystem
    scan_op_timeout        =    1h ;
    # exit if operation timeout is reached?
    exit_on_timeout        =    TRUE ;
    # external command called on scan termination
    # special arguments can be specified: {cfg} = config file path,
    # {fspath} = path to managed filesystem
    #completion_command     =    "/path/to/my/script.sh -f {cfg} -p {fspath}" ;

    # Internal scheduler granularity (for testing and of scan, hangs, ...)
    spooler_check_interval =  1min ;

    # Memory preallocation parameters
    nb_prealloc_tasks      =   256 ;

    Ignore
    {
        # ignore ".snapshot" and ".snapdir" directories (don't scan them)
        type == directory
        and
        ( name == ".snapdir" or name == ".snapshot" )
    }
}

ChangeLog
{
    # 1 MDT block for each MDT :
    MDT
    {
        # name of the first MDT
        mdt_name  = "MDT0000" ;

        # id of the persistent changelog reader
        # as returned by "lctl changelog_register" command
        reader_id = "cl1" ;
    }

    # another MDT
  #  MDT
  #  {
  #      mdt_name  = "MDT0001" ;
  #      reader_id = "cl1" ;
  #  }

    # yet another MDT
  #  MDT
  #  {
  #      mdt_name  = "MDT0002" ;
  #      reader_id = "cl1" ;
  #  }
    # clear changelog every 1024 records:
    batch_ack_count = 1024 ;

    force_polling    = ON ;
    polling_interval = 1s ;
    queue_max_size   = 1000 ;
    queue_max_age    = 5s ;
    queue_check_interval = 1s ;
}

Purge_Parameters
{
    # nbr of thread for performing purge operations
    nb_threads_purge      = 4 ;

    # Immediately after purging data, 'df'/'ost df' may not return an exact 
value,
    # especially if freeing disk space is asynchronous.
    # So, it is necessary to wait for a while after a purge before
    # issuing a new 'df'/'ost df' command.
    post_purge_df_latency = 1min ;

    # queue size (for leveraging purge threads load)
    purge_queue_size      = 4096 ;

    # Limit the size of database result sets (save memory)
    db_result_size_max    = 100000 ;
    # When applying purge policies, recheck entries
    # that previously matched ignored classes.
    # Enable it after changing fileclass definitions
    # or if entries move from one class to another.
    # This can significantly slow down policy application.
    recheck_ignored_classes = TRUE;

    # By default, purge action is removing the entry
    # from the filesystem. You can define an alternative
    # action by specifying a script command.
    # The following parameters can be specified:
    #    {path}: posix path to the entry
    #    {fid}: fid of the entry
    #    {fsname}: Lustre fsname
    #purge_command = ="/usr/bin/move_to_trash.sh {path}";

}

Purge_Trigger
{
    trigger_on         = OST_usage ;
    high_threshold_pct = 85% ;
    low_threshold_pct  = 80% ;
    check_interval     = 5min ;
}

Purge_Trigger
{
    trigger_on         = global_usage ;
    high_threshold_pct = 90% ;
    low_threshold_pct  = 85% ;
    check_interval     = 5min ;
    # raise an alert when the high threshold is reached
    alert_high         = TRUE ;
    # raise an alert if not enough data can be purged
    # to reach the low threshold
    alert_low          = TRUE ;
}

Purge_Trigger
{
    trigger_on         = user_usage(charlie,foo) ;
    high_threshold_vol = 1TB ;
    low_threshold_vol  = 950GB ;
    check_interval     = 12h ;
    # send an alert when the quota is reached
    alert_high         = TRUE ;
}


Rmdir_Parameters
{
    # Interval for performing empty directory removal
    runtime_interval = 12h ;

    # Number of threads for performing rmdir operations
    nb_threads_rmdir = 4 ;

    # Queue size (for leveraging rmdir threads load)
    rmdir_queue_size = 256 ;
}


------------------------------------------------------------------------------
Mobile security can be enabling, not merely restricting. Employees who
bring their own devices (BYOD) to work are irked by the imposition of MDM
restrictions. Mobile Device Manager Plus allows you to control only the
apps on BYO-devices by containerizing them, leaving personal data untouched!
https://ad.doubleclick.net/ddm/clk/304595813;131938128;j
_______________________________________________
robinhood-support mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/robinhood-support

Reply via email to