User Tools

Site Tools


neuroimagen:neuro4.pm

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
Last revision Both sides next revision
neuroimagen:neuro4.pm [2020/08/04 10:58]
127.0.0.1 external edit
neuroimagen:neuro4.pm [2022/05/17 07:49]
osotolongo [NEURO4]
Line 3: Line 3:
 Esta pagina es solo de referencia de los modulos Perl que dan soporte al pipeline de neuroimagen. Esta pagina es solo de referencia de los modulos Perl que dan soporte al pipeline de neuroimagen.
  
-===== NEURO4.pm =====+<markdown> 
 +NEURO4
  
-Subrutinas: +This is set of functions for helping in the pipeline
-  * sub print_help -> Imprime la ayuda de un script, tomandola del directorio //docs// +
-  * sub escape_name -> Escapa caracteres molestos de los nombres de directorio +
-  * sub trim -> Elimina espacios, antes y despues de un string +
-  * sub check_or_make -> crea un directorio si no existe +
-  * sub load_project -> lee las variables de un proyecto dentro de un hash +
-  * sub check_subj -> lee la estructura BIDS de un sujeto dentro de un hash +
-  * sub check_fs_subj -> comprueba que un sujeto se haya procesado en FS +
-  * sub get_lut -> +
-  * sub run_dckey -> corre //dckey// sobre un DICOM y saca el valor de un tag +
-  * sub dclokey -> devuelve el valor de un tag oculto en un DICOM +
-  * sub centiloid_fbb -> devuelve el valor de Centiloide para un valor de SUVR en FBB +
-  * sub populate -> Lee un archivo de CSV (o texto plano) y devuelve un hash con los valores, segun el patron que se suministra +
-  * sub get_subjects -> Devuelve un array con los sujetos de un proyecto contenidos en un archivo de DB del proyecto +
-  * sub get_list -> Devuelve un array con los sujetos de un proyecto contenidos, ignora el formato posterior los primero 4 numeros +
-  * sub get_pair -> devuelve un hash que contiene una DB de proyecto +
-  * sub shit_done -> Envia un email con un archivo comprimido de attachment +
-  * sub cut_shit -> Tomando como argumentos la DB de un proyecto y una lista simple de sujetos, devuelve un array con los sujetos de la lista incluidos en el proyecto.+
  
 +- print\_help
  
-<code perl NEURO4.pm> +    just print the help
-#!/usr/bin/perl+
  
-use strict; use warnings; +    this funtions reads the path of a TXT file and print it at STDOUT
-package NEURO4; +
-require Exporter; +
-use File::Slurp qw(read_file); +
-use File::Find::Rule; +
-use Mail::Sender; +
-use MIME::Lite; +
-use File::Basename qw(basename);+
  
-our @ISA                = qw(Exporter); +    usage:
-our @EXPORT             = qw(print_help load_project cut_shit); +
-our @EXPORT_OK  = qw(print_help escape_name trim check_or_make load_project populate get_subjects check_subj check_fs_subj get_list shit_donei get_pair cut_shit); +
-our %EXPORT_TAGS        = (all => [qw(print_help escape_name trim check_or_make load_project check_subj check_fs_subj get_lut run_dckey dclokey centiloid_fbb populate get_subjects get_list shit_done get_pair cut_shit)], +
-                                        usual => [qw(print_help load_project check_or_make cut_shit)],); +
-our $VERSION    = 1.0;+
  
-sub print_help +            print_help(help_file);
-# just print the help +
-        my $hlp = shift; +
-        open HELP, "<$hlp"; +
-        while(<HELP>)+
-                print; +
-        } +
-        close HELP; +
-        return; +
-}+
  
-sub trim { +- escape\_name
-        my $string = shift; +
-        $string =~ s/^\s+//;  #trim leading space +
-        $string =~ s/\s+$//;  #trim trailing space +
-        return $string; +
-}+
  
-sub check_or_make { +    This function takes a string and remove some especial characters 
-        my $place = shift; +    in order to escape directory names with a lot of strange symbols.
-        # I must check if directory exist, else I create it. +
-        if(opendir(TEST, $place)){ +
-        closedir TEST; +
-        }else{ +
-                mkdir $place; +
-        } +
-}+
  
-sub load_project { +    It returns the escaped string
-        my $study = shift; +
-        my %stdenv = map {/(.*) = (.*)/; $1=>$2 } read_file $ENV{HOME}."/.config/neuro/".$study.".cfg"; +
-        return %stdenv; +
-}+
  
-sub check_subj { +    usage:
-        my $proj_path = shift; +
-        my $subj = shift; +
-        my %mri = ('T1w' => 0, 'T2w' => 0, 'dwi' => 0, 'dwi_sbref' => 0); +
-        my $subj_dir = $proj_path.'/bids/sub-'.$subj.'/anat'; +
-        if( -e $subj_dir && -d $subj_dir){ +
-                my @t1 = find(file => 'name' => "sub-$subj*_T1w.nii.gz", in =>  $subj_dir); +
-                if (-e $t1[0] && -f $t1[0]){ +
-                        $mri{'T1w'} = $t1[0]; +
-                } +
-                my @t2 = find(file => 'name' => "sub-$subj*_T2w.nii.gz", in =>  $subj_dir); +
-                if (-e $t2[0] && -f $t2[0]){ +
-                        $mri{'T2w'} = $t2[0]; +
-                } +
-        } +
-        $subj_dir = $proj_path.'/bids/sub-'.$subj.'/dwi'; +
-        if( -e $subj_dir && -d $subj_dir){ +
-                my @dwi_sbref = find(file => 'name' => "sub-$subj*_sbref_dwi.nii.gz", in =>  $subj_dir); +
-                if (-e $dwi_sbref[0] && -f $dwi_sbref[0]){ +
-                        $mri{'dwi_sbref'} = $dwi_sbref[0]; +
-                } +
-                my @dwi = find(file => 'name' => "sub-$subj*_dwi.bval", in =>  $subj_dir); +
-                if (-e $dwi[0] && -f $dwi[0]){ +
-                        ($mri{'dwi'} = $dwi[0]) =~ s/bval$/nii\.gz/; +
-                } +
-        } +
-        return %mri; +
-}+
  
-sub check_fs_subj { +            escape_name(string);
-        my $subj = shift; +
-        my $subj_dir = qx/echo \$SUBJECTS_DIR/; +
-        chomp($subj_dir); +
-        my $place = $subj_dir."/".$subj; +
-        my $ok = 0; +
-        # I must check if directory exist. +
-        if( -e $place && -d $place){$ok = 1;} +
-        return $ok; +
-}+
  
-sub get_lut { +trim
-        my $ifile = shift; +
-        my $patt = '\s*(\d{1,8})\s*([A-Z,a-z,\-,\_,\.,0-9]*)\s*.*'; +
-        my %aseg_data = map {/$patt/; $1=>$2} grep {/^$patt/} read_file $ifile; +
-        return %aseg_data; +
-}+
  
-sub run_dckey{ +    This function takes a string and remove any trailing spaces after and before the text
-        my @props = @_; +
-        my $order = "dckey -k $props[1] $props[0] 2\>\&1"; +
-        print "$order\n"; +
-        my $dckey = qx/$order/; +
-        chomp($dckey); +
-        $dckey =~ s/\s*//g; +
-        return $dckey; +
-}+
  
-sub dclokey{ +    usage:
-        my @props = @_; +
-        my $order = "dcdump $props[0] 2\>\&1 \| grep \"".$props[1]."\""; +
-        print "$order\n"; +
-        my $line = qx/$order/; +
-        (my $dckey) = $line =~ /.*VR=<\w{2}>\s*VL=<0x\d{3,4}[a-z]*>\s*<(.*)\s*>/; +
-        if($dckey){ +
-                $dckey =~ s/\s*//g; +
-        } +
-        return $dckey; +
-}+
  
-sub centiloid_fbb { +            trim(string);
-    my $suvr = shift; +
-    return 153.4*$suvr-154.9; +
-}+
  
-sub populate { +- check\_or\_make
-        my $patt = shift; +
-        my $csv = shift; +
-        my %pdata = map { /^$patt$/; $1 => $2} grep {/^$patt$/} read_file $csv; +
-        return %pdata; +
-}+
  
-sub get_subjects { +    This is mostly helpless, just takes a path
-        my $db = shift; +    checks if exists and create it otherwise
-        my @slist = map {/^(\d{4});.*$/; $1} grep { /^\d{4}/ } read_file($dbchomp => 1); +
-        return @slist; +
-}+
  
-sub get_list { +    usage:
-        my $ifile = shift; +
-        my @slist = map {/^(\d{4}).*$/; $1} grep { /^\d{4}/ }read_file($ifile, chomp => 1); +
-        return @slist; +
-}+
  
-sub get_pair { +            check_or_make(path);
-        my $ifile = shift; +
-        my %pet_data = map {/(.*);(.*)/; $1=>$2} read_file $ifile; +
-        return %pet_data; +
-}+
  
-sub shit_done { +inplace
-        my @adv = @_; +
-        my $msg = MIME::Lite->new( +
-                From    => "$ENV{'USER'}\@detritus.fundacioace.com", +
-                To      => "$ENV{'USER'}\@detritus.fundacioace.com", +
-                Subject => 'Script terminado', +
-                Type    => 'multipart/mixed', +
-        );+
  
-        $msg->attach( +    This function takes a path and a file name or two paths 
-                Type     => 'TEXT', +    and returns a string with a single path as result of 
-                Data     => "$adv[0] ha terminado en el estudio $adv[1].\n\n", +    the concatenation of the first one plus the second one
-        );+
  
-        $msg->attach( +    usage:
-                Type     => 'application/gzip', +
-                Path     => $adv[2], +
-                Filename => basename($adv[2]), +
-        );+
  
-        $msg->send; +            inplace(path, filename);
-}+
  
-sub cut_shit { +load\_project
-        my $db = shift; +
-        my $cfile = shift; +
-        my @plist = get_subjects($db); +
-        my @oklist; +
-        if (-e $cfile){ +
-                my @cuts = get_list($cfile); +
-                foreach my $cut (sort @cuts){ +
-                        if(grep {/$cut/} @plist){ +
-                                push @oklist, $cut; +
-                        } +
-                } +
-        }else{ +
-                @oklist = @plist; +
-        } +
-return @oklist; +
-}+
  
-sub escape_name { +    This function take the name of project, reads the configuration file 
-# in order to escape directory names with lot of strange symbols +    that is located at ~/.config/neuroand return every project configuration 
-        my $name = shift; +    stored as a hash that can be used at the scripts
-        $name=~s//\\\ /g; +
-        $name=~s/\`/\\\`/g; +
-        $name=~s/\(/\\\(/g; +
-        $name=~s/\)/\\\)/g; +
-        return $name; +
-}+
  
-</code>+    usage:
  
-===== FSMetrics =====+            load_project(project_name);
  
-  * fs_file_metrics -> devuelve las ordenes que se ejecutaran en la extraccion de las metricas de FS+check\_subj
  
-<code perl FSMetrics.pm> +    Here the fun begins
-#!/usr/bin/perl+
  
-use strict; use warnings; +    This function takes as input the name of the project and the subject ID 
-package FSMetrics; +    Then it seeks along the BIDS structure for this subject and returns a hash, 
-require Exporter; +    containing the MRI proper images. 
-our @ISA                = qw(Exporter); + 
-our @EXPORT             = qw(fs_file_metrics); +    It should return a single value, except for the T1w images, where an array 
-our @EXPORT_OK  = qw(fs_file_metrics); +    is returned. This was though this way because mostly a single session is done. 
-our %EXPORT_TAGS        = (all => [qw(fs_file_metrics)]usual => [qw(fs_file_metrics)],); +    Howeverthe skill to detect more than one MRI was introduced to allow the 
-our $VERSION    = 0.1 +    movement correction when ADNI images are analyzed 
-; + 
-sub fs_file_metrics { +    Sofor T1w images the returned hash should be asked as 
-my %stats = ('wmparc_stats=> { + 
-                'order' => "asegstats2table --subjects <list> --meas volume --skip --statsfile wmparc.stats --all-segs --tablefile <fs_output>/wmparc_stats.txt", +            @{$nifti{'T1w'}} 
-                'active' => 1, + 
-        }+    but for other kind of image it should asked as 
-        'aseg_stats' => { + 
-                'order' => "asegstats2table --subjects <list> --meas volume --skip --tablefile <fs_output>/aseg_stats.txt", +            $nifti{'T2w'
-                'active' => 1, + 
-        }+    usage: 
-        'aparc_volume_lh' => { + 
-                'order' => "aparcstats2table --subjects <list> --hemi lh --meas volume --skip --tablefile <fs_output>/aparc_volume_lh.txt", +            check_subj(project_path, bids_id); 
-                'active' => 1, + 
-        }+check\_pet 
-        'aparc_thickness_lh' => { + 
-                'order' => "aparcstats2table --subjects <list> --hemi lh --meas thickness --skip --tablefile <fs_output>/aparc_thickness_lh.txt", +    This function takes as input the name of the project and the subject ID 
-                'active' => 1, +    Then it seeks along the BIDS structure for this subject and returns a hash, 
-        }+    containing the PET proper images. 
-        'aparc_area_lh' => { + 
-                'order' => "aparcstats2table --subjects <list> --hemi lh --meas area --skip --tablefile <fs_output>/aparc_area_lh.txt", +    If also a tracer is given as input, then the returned hash contains the PET-tau 
-                'active' => 1+    associated to this tracer. This was introduced as part of a project were the subjects 
-        }, +    were analyzed with different radiotracers. 
-        'aparc_meancurv_lh' => { + 
-                'order' => "aparcstats2table --subjects <list> --hemi lh --meas meancurv --skip --tablefile <fs_output>/aparc_meancurv_lh.txt"+    If no tracer is given, it will seek for the FBB PETs. Those PETs are stored as 
-        }+ 
-        'aparc_volume_rh' => { +            single: 4x5min 
-                'order' => "aparcstats2table --subjects <list> --hemi rh --meas volume --skip --tablefile <fs_output>/aparc_volume_rh.txt"+            combined: 20min 
-                'active' => 1+ 
-        }+    usage: 
-        'aparc_thickness_rh' => { + 
-                'order' => "aparcstats2table --subjects <list--hemi rh --meas thickness --skip --tablefile <fs_output>/aparc_thickness_rh.txt", +            check_pet(project_pathbids_id, $optional_radiotracer); 
-                'active' => 1, + 
-        }, +check\_fs\_subj 
-        'aparc_area_rh' => { + 
-                'order' => "aparcstats2table --subjects <list> --hemi rh --meas area --skip --tablefile <fs_output>/aparc_area_rh.txt", +    This function checks if the Freesurfer directory of a given subjects exists 
-                'active' => 1, + 
-        }, +    usage: 
-        'aparc_meancurv_rh=> { + 
-                'order' => "aparcstats2table --subjects <list> --hemi rh --meas meancurv --skip --tablefile <fs_output>/aparc_meancurv_rh.txt", +            check_fs_subj(freesurfer_id) 
-        }+ 
-        'lh.a2009s.volume' => { +get\_lut 
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc aparc.a2009s --meas volume --skip -t <fs_output>/lh.a2009s.volume.txt", + 
-        }+    I really don't even remenber what this shit does 
-        'lh.a2009s.thickness' => { + 
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc aparc.a2009s --meas thickness --skip -t <fs_output>/lh.a2009s.thickness.txt"+run\_dckey 
-        }+ 
-        'lh.a2009s.area' ={ +    Get the content of a public tag from a DICOM file
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc aparc.a2009s --meas area --skip -t <fs_output>/lh.a2009s.area.txt"+ 
-        }+    usage: 
-        'lh.a2009s.meancurv' => { + 
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc aparc.a2009s --meas meancurv --skip -t <fs_output>/lh.a2009s.meancurv.txt", +            run_dckey(keydicom) 
-        }, + 
-        'rh.a2009s.volume' => { +dclokey 
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc aparc.a2009s --meas volume --skip -t <fs_output>/rh.a2009s.volume.txt", + 
-        }, +    Get the content of a private tag from a DICOM file
-        'rh.a2009s.thickness' => { + 
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc aparc.a2009s --meas thickness --skip -t <fs_output>/rh.a2009s.thickness.txt"+    usage: 
-        }+ 
-        'rh.a2009s.area' => { +            dclokey(keydicom) 
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc aparc.a2009s --meas area --skip -t <fs_output>/rh.a2009s.area.txt"+ 
-        }, +centiloid\_fbb 
-        'rh.a2009s.meancurv' => { + 
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc aparc.a2009s --meas meancurv --skip -t <fs_output>/rh.a2009s.meancurv.txt"+    Returns the proper centiloid value for a given SUVR
-        }+    Only valid for FBB. 
-        'lh.BA.volume' => { + 
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas volume --skip -t <fs_output>/lh.BA.volume.txt", +    usage: 
-        }+ 
-        'lh.BA.thickness' => { +            centiloid_fbb(suvr); 
-                'order=> "aparcstats2table --hemi lh --subjects <list> --parc BA --meas thickness --skip -t <fs_output>/lh.BA.thickness.txt"+ 
-        }+- populate 
-        'lh.BA.area' => { + 
-                'order' ="aparcstats2table --hemi lh --subjects <list> --parc BA --meas area --skip -t <fs_output>/lh.BA.area.txt"+    Takes a pattern and a filename and stores the content of the file 
-        }+    into a HASH according to the given pattern 
-        'lh.BA.meancurv' => { + 
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas meancurv --skip -t <fs_output>/lh.BA.meancurv.txt"+    usage: 
-        }+ 
-        'rh.BA.volume' => { +            populate(patternfilename); 
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas volume --skip -t <fs_output>/rh.BA.volume.txt", + 
-        }, +get\_subjects 
-        'rh.BA.thickness' => { + 
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas thickness --skip -t <fs_output>/rh.BA.thickness.txt"+    Parse a project database taking only the subjects and storing them into an array
-        }+    The databse is expected to be build as
-       'lh.BA.area' => { + 
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas area --skip -t <fs_output>/lh.BA.area.txt"+            0000;name 
-        }, + 
-        'lh.BA.meancurv=> { +    usage: 
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas meancurv --skip -t <fs_output>/lh.BA.meancurv.txt"+ 
-        }+            get_subjects(filename); 
-        'rh.BA.volume' => { + 
-                'order' ="aparcstats2table --hemi rh --subjects <list> --parc BA --meas volume --skip -t <fs_output>/rh.BA.volume.txt", +get\_list 
-        }, + 
-        'rh.BA.thickness=> { +    Parse a project database taking only the subjects and storing them into an array. 
-                'order=> "aparcstats2table --hemi rh --subjects <list> --parc BA --meas thickness --skip -t <fs_output>/rh.BA.thickness.txt", +    The databse is expected to be build with a four digits number at the beginning of 
-        }+    line. Is similar to get\_subjects() function but less restrictive 
-        'rh.BA.area' => { + 
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas area --skip -t <fs_output>/rh.BA.area.txt", +    usage: 
-        }+ 
-        'rh.BA.meancurv' ={ +            get_list(filename); 
-                'order' => "aparcstats2table --hemi rh --subjects <list--parc BA --meas meancurv --skip -t <fs_output>/rh.BA.meancurv.txt", + 
-        }, +get\_pair 
-); + 
-return %stats; +    A single file is loaded as input and parse into a HASH. 
-}+    The file should be written in the format: 
 + 
 +            key;value 
 + 
 +    usage: 
 + 
 +            get_pair(filename); 
 + 
 +shit\_done 
 + 
 +    this function is intended to be used  after a script ends 
 +    and then an email is send to the user 
 +    with the name of the scriptthe name of the project and th results attached 
 + 
 +    usage: 
 + 
 +            shit_done(script_nameproject_name, attached_file) 
 + 
 +cut\_shit 
 + 
 +    This function takes a project database and a file with a list, then 
 +    returns the elements that are common to both. 
 +    It is intended to be used to restrict the scripts action 
 +    over a few elements. It returns a single array. 
 + 
 +    If it is correctly usedfirst the db is identified with 
 +    _load\_project()_ function and then passed through this function 
 +    to get the array of subjects to be analyzed. If the file with 
 +    the cutting list do not existan array with all the subjects 
 +    is returned. 
 + 
 +    usage: 
 + 
 +            cut_shit(dblist); 
 + 
 +getLoggingTime 
 + 
 +    This function returns a timestamp based string intended to be used 
 +    to make unique filenames 
 + 
 +    Stolen from Stackoverflow 
 + 
 +    usage: 
 + 
 +            getLoggingTime(); 
 +                                                                                                      
 +</markdown> 
 + 
 +<markdown> 
 + 
 +# FSMetrics 
 + 
 +Bunch of helpers for storing ROI structure and relative data 
 + 
 +- fs\_file\_metrics 
 + 
 +    This function does not read any inputIt sole purpose is to 
 +    returns a HASH containing the templates of order for converting Freesurfer (FS) 
 +    results into tables. 
 + 
 +    Any hash element is composed by the template ('order')a boolean ('active') to decide 
 +    if the FS stats will be processed and the name of the FS stat file ('file'). 
 +    The order template has two wildcards (<list> and <fs\_output>) that should be 
 +    parsed and changed by the FS subject id and the output directory where the 
 +    data tables will be stored for each subject 
 + 
 +    The function could be invoked as
 + 
 +            my %stats = fs_file_metrics(); 
 + 
 +    in any script where this information would be needed. 
 + 
 +    The boolean element could be used to choose the stats that should 
 +    be processed and can be added or modified even at run time if neededThe 
 +    stored booleans only provided a decent default 
 + 
 +fs\_fbb\_rois 
 + 
 +    _deprecated_ 
 + 
 +    This function exports a HASH that contains the Freesurfer composition of the 
 +    usual segmentations used for building the SUVR ROI 
 + 
 +tau\_rois 
 + 
 +    This function takes a string as input and returns an ARRAY containing 
 +    the list of ROIs that should be build and where the SUVR should be calculated 
 + 
 +    It is intended to be used for PET-Tau but could be used anywhere 
 + 
 +    By default a list of Braak areas are returnedIf the input string is **alt** 
 +    a grouping of those Braak areas is returnedIf the purpose is to build 
 +    a meta\_temporal ROI the string **meta** should be passed as input 
 + 
 +    The main idea here is read the corresponding file for each ROIstored at 
 +    `PIPEDIR/lib/tau/` and build each ROI with the FS LUTs store there 
 + 
 +pet\_rois 
 + 
 +    This function takes a string as input and returns an ARRAY containing 
 +    the list of ROIs that should be build and where the SUVR should be calculated 
 + 
 +    Input values are **parietal****frontal**, **pieces** or **global** (default) 
 + 
 +    The main idea here is read the corresponding file for each ROIstored at 
 +    `PIPEDIR/lib/pet/` and build each ROI with the FS LUTs stored there 
 +     
 +</markdown
 + 
 +<markdown> 
 +# SLURM 
 + 
 +This module contains just a function to send the jobs to SLURM 
 +from the Perl scripts 
 + 
 +send2slurm 
 + 
 +    The function takes a HASH as input where all the information 
 +    relative to the job should be storedNo data is mandatory 
 +    inside the input HASH, since the minimal values are automagicaly 
 +    asigned by default as a constructor (no really, but anyway). 
 + 
 +    Take into account that this subroutine only pass the parameters 
 +    to SLURMSothe logic behind your actions should correspond 
 +    to what you want to do in any caseexactly as if you were 
 +    writing sbatch scripts
 + 
 +    The managed options for SLURM jobs are: 
 + 
 +            filename: File where the sbatch script will be stored 
 +            job_name: Job name for SLURM (-J) 
 +            cpus: Number of CPUs to be used by each job (-c) 
 +            mem_per_cpu: Amount of memory to be used for each CPU (--mem-per-cpu) 
 +            time: Maximum time that the job will be allowed to run (--time) 
 +            output: File where the sbatch script output will be stored (-o) 
 +            partition: SLURM partition to be used (-p) 
 +            gres: GPUs to be used (--gres) 
 +            command: Command to be executed at sbatch script 
 +            mailtype: Type of warning to be emailed (--mail-type) 
 +            dependency: Full dependency string to be used at sbatch execution (--dependency)see more below 
 + 
 +    The function returns the jobid of the queued jobso it can be used to 
 +    build complex workflows. 
 + 
 +    usage: my $job\_id send2slurm(\\%job\_properties); 
 + 
 +    Warning email: By default, if an empty HASH is passed to the function, 
 +    a no command sbatch script is launched 
 +    with _--mail-type=END_ optionThe intention is that this could be used to 
 +    warn at the end of any launched swarmAlso, by changing **mailtype** but 
 +    ommiting the **command** value you can force the function to execute 
 +    an empty sbatch job with whatever warning behavior that you choose. 
 + 
 +    Dependencies: If dependencies are going to be used, you need to pass to 
 +    the function the full string that SLURM expectsThat isyou can pass something 
 +    like _singleton_ or _after:000000_ or even _afterok:000000,000001,000002_
 +    This last can be buildby example, storing every previous jobid into an ARRAY 
 +    and passing then as
 + 
 +            ... 
 +                    my $jobid = send2slurm(\%previous); 
 +                    push @jobids$jobid; 
 +            ... 
 +            $task{'dependency'= 'afterok:'.join(',',@jobids); 
 +            ..
 +            send2slurm(\%task); 
 + 
 +    Of courseif dependencies are not going to be used, the 
 +    **dependency** option could be safely ignored. But notice thatif you are 
 +    reusing a HASH then this key should be deleted from it
 +</markdown> 
 + 
 +<markdown> 
 +# XNATACE 
 + 
 +xconf 
 + 
 +    Publish path of xnatapic configuration file 
 + 
 +    usage: 
 + 
 +            $path = xconf(); 
 + 
 +xget\_conf 
 + 
 +    Get the XNAT connection data into a HASH 
 + 
 +    usage: 
 + 
 +            %xnat_data = xget_conf() 
 + 
 +xget\_pet 
 + 
 +    Get the XNAT PET experiment ID 
 + 
 +    usage: 
 + 
 +            xget_pet(host, jsession, project, subject) 
 + 
 +xget\_mri 
 + 
 +    Get the XNAT MRI experiment ID 
 + 
 +    usage: 
 + 
 +            xget_mri(host, jsession, project, subject) 
 + 
 +xget\_fs\_data 
 +Get the full Freesurfer directory in a tar.gz file 
 + 
 +    usage: 
 + 
 +            xget_fs_data(hostjsession, project, experiment, output_path) 
 + 
 +- xget\_fs\_stats 
 + 
 +    Get a single stats file from Freesurfer segmentation 
 + 
 +    usage: 
 + 
 +            xget_fs_stats(hostjsession, experiment stats_file, output_file) 
 + 
 +- xget\_session 
 + 
 +    Create a new JSESSIONID on XNATReturn the connection data 
 +    for the server AND the ID of the created session 
 + 
 +    usage: 
 + 
 +            xget_session(); 
 + 
 +xput\_report 
 + 
 +    Upload a pdf report to XNAT 
 + 
 +    usage: 
 + 
 +            xput_report(host, jsession, subject, experiment, pdf_file); 
 + 
 +xput\_rvr 
 + 
 +    Upload a JSON file with VR data 
 + 
 +    usage: 
 + 
 +            xput_rvr(host, jsession, experiment, json_file); 
 + 
 +xget\_rvr 
 + 
 +    Get VR results into a HASHOutput is a hash with filenames and URI of each element stored at RVR 
 + 
 +    usage: 
 + 
 +            xget_rvr(hostjsession, project, experiment); 
 + 
 +- xget\_rvr\_data 
 + 
 +    Get RVR JSON data into a hash 
 + 
 +    usage: 
 + 
 +            xget_rvr_data(hostjsession, URI); 
 + 
 +xget\_subjects 
 + 
 +    Get the list of subjects of a project into a HASH
 +    El HASH de input_%sbjs_, se construye como _{ XNAT\_ID => Label }_ 
 + 
 +    usage: 
 + 
 +            %sbjs xget_subjects(hostjsession, project); 
 + 
 +- xget\_pet\_reg 
 + 
 +    Download de pet registered into native space in nifti format 
 + 
 +    usage: 
 + 
 +            xget_pet_reg(hostjsession, experiment, nifti_output); 
 + 
 +- xget\_pet\_data 
 + 
 +    Get the PET FBB analysis results into a HASH 
 + 
 +    usage: 
 + 
 +            %xresult xget_pet_data(host, jsession, experiment); 
 + 
 +xget\_exp\_data 
 + 
 +    Get a data field of an experiment. 
 +    The desired field shoud be indicated as input. 
 +    By exampleif you want the date of the experiment this is 
 +    seeked as 
 + 
 +            my $xdate = xget_exp_data($host, $session_id, $experiment, 'date'
 + 
 +    There are some common fields as _date__label_ or _dcmPatientId_ 
 +    but in general  you should look at
 + 
 +            curl -X GET -b JSESSIONID=00000blahblah "http://myhost/data/experiments/myexperiment?format=json" 2>/dev/null | jq '.items[0].data_fields
 + 
 +    in order to know the available fields 
 + 
 +    usage: 
 + 
 +            $xdata xget_exp_data(hostjsession, experimentfield); 
 + 
 +xget\_sbj\_data 
 + 
 +    Get the subjects metadataNot too 
 +    much interesting but to extract 
 +    the subject label
 + 
 +    usage: 
 + 
 +            $xdata = xget_sbj_data(hostjsession, subject, field); 
 +                                                                                                      
 + 
 +</markdown
 +====== Dependencias ====== 
 + 
 +<code
 +Data::Dump 
 +File::Slurp 
 +File::Basename 
 +File::Temp 
 +File::Copy::Recursive 
 +File::Copy 
 +File::Find::Rule 
 +File::Remove 
 +Cwd 
 +Spreadsheet::Write 
 +Text::CSV 
 +File::Path 
 +MIME::Lite 
 +JSON
 </code> </code>
 +
 +
 +
 +
 +
neuroimagen/neuro4.pm.txt · Last modified: 2022/05/27 07:44 by osotolongo