User Tools

Site Tools


neuroimagen:neuro4.pm

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
neuroimagen:neuro4.pm [2021/02/19 09:47]
osotolongo [FSMetrics]
neuroimagen:neuro4.pm [2022/05/17 07:49] (current)
osotolongo [NEURO4]
Line 3: Line 3:
 Esta pagina es solo de referencia de los modulos Perl que dan soporte al pipeline de neuroimagen. Esta pagina es solo de referencia de los modulos Perl que dan soporte al pipeline de neuroimagen.
  
-===== NEURO4.pm =====+<markdown> 
 +NEURO4
  
-Subrutinas: +This is set of functions for helping in the pipeline
-  * sub print_help -> Imprime la ayuda de un script, tomandola del directorio //docs// +
-  * sub escape_name -> Escapa caracteres molestos de los nombres de directorio +
-  * sub trim -> Elimina espacios, antes y despues de un string +
-  * sub check_or_make -> crea un directorio si no existe +
-  * sub load_project -> lee las variables de un proyecto dentro de un hash +
-  * sub check_subj -> lee la estructura BIDS de un sujeto dentro de un hash +
-  * sub check_fs_subj -> comprueba que un sujeto se haya procesado en FS +
-  * sub get_lut -> +
-  * sub run_dckey -> corre //dckey// sobre un DICOM y saca el valor de un tag +
-  * sub dclokey -> devuelve el valor de un tag oculto en un DICOM +
-  * sub centiloid_fbb -> devuelve el valor de Centiloide para un valor de SUVR en FBB +
-  * sub populate -> Lee un archivo de CSV (o texto plano) y devuelve un hash con los valores, segun el patron que se suministra +
-  * sub get_subjects -> Devuelve un array con los sujetos de un proyecto contenidos en un archivo de DB del proyecto +
-  * sub get_list -> Devuelve un array con los sujetos de un proyecto contenidos, ignora el formato posterior los primero 4 numeros +
-  * sub get_pair -> devuelve un hash que contiene una DB de proyecto +
-  * sub shit_done -> Envia un email con un archivo comprimido de attachment +
-  * sub cut_shit -> Tomando como argumentos la DB de un proyecto y una lista simple de sujetos, devuelve un array con los sujetos de la lista incluidos en el proyecto.+
  
 +- print\_help
  
-<code perl NEURO4.pm> +    just print the help
-#!/usr/bin/perl+
  
-use strict; use warnings; +    this funtions reads the path of a TXT file and print it at STDOUT
-package NEURO4; +
-require Exporter; +
-use File::Slurp qw(read_file); +
-use File::Find::Rule; +
-use Mail::Sender; +
-use MIME::Lite; +
-use File::Basename qw(basename);+
  
-our @ISA                = qw(Exporter); +    usage:
-our @EXPORT             = qw(print_help load_project cut_shit); +
-our @EXPORT_OK  = qw(print_help escape_name trim check_or_make load_project populate get_subjects check_subj check_fs_subj get_list shit_donei get_pair cut_shit); +
-our %EXPORT_TAGS        = (all => [qw(print_help escape_name trim check_or_make load_project check_subj check_fs_subj get_lut run_dckey dclokey centiloid_fbb populate get_subjects get_list shit_done get_pair cut_shit)], +
-                                        usual => [qw(print_help load_project check_or_make cut_shit)],); +
-our $VERSION    = 1.0;+
  
-sub print_help +            print_help(help_file);
-# just print the help +
-        my $hlp = shift; +
-        open HELP, "<$hlp"; +
-        while(<HELP>)+
-                print; +
-        } +
-        close HELP; +
-        return; +
-}+
  
-sub trim { +- escape\_name
-        my $string = shift; +
-        $string =~ s/^\s+//;  #trim leading space +
-        $string =~ s/\s+$//;  #trim trailing space +
-        return $string; +
-}+
  
-sub check_or_make { +    This function takes a string and remove some especial characters 
-        my $place = shift; +    in order to escape directory names with a lot of strange symbols.
-        # I must check if directory exist, else I create it. +
-        if(opendir(TEST, $place)){ +
-        closedir TEST; +
-        }else{ +
-                mkdir $place; +
-        } +
-}+
  
-sub load_project { +    It returns the escaped string
-        my $study = shift; +
-        my %stdenv = map {/(.*) = (.*)/; $1=>$2 } read_file $ENV{HOME}."/.config/neuro/".$study.".cfg"; +
-        return %stdenv; +
-}+
  
-sub check_subj { +    usage:
-        my $proj_path = shift; +
-        my $subj = shift; +
-        my %mri = ('T1w' => 0, 'T2w' => 0, 'dwi' => 0, 'dwi_sbref' => 0); +
-        my $subj_dir = $proj_path.'/bids/sub-'.$subj.'/anat'; +
-        if( -e $subj_dir && -d $subj_dir){ +
-                my @t1 = find(file => 'name' => "sub-$subj*_T1w.nii.gz", in =>  $subj_dir); +
-                if (-e $t1[0] && -f $t1[0]){ +
-                        $mri{'T1w'} = $t1[0]; +
-                } +
-                my @t2 = find(file => 'name' => "sub-$subj*_T2w.nii.gz", in =>  $subj_dir); +
-                if (-e $t2[0] && -f $t2[0]){ +
-                        $mri{'T2w'} = $t2[0]; +
-                } +
-        } +
-        $subj_dir = $proj_path.'/bids/sub-'.$subj.'/dwi'; +
-        if( -e $subj_dir && -d $subj_dir){ +
-                my @dwi_sbref = find(file => 'name' => "sub-$subj*_sbref_dwi.nii.gz", in =>  $subj_dir); +
-                if (-e $dwi_sbref[0] && -f $dwi_sbref[0]){ +
-                        $mri{'dwi_sbref'} = $dwi_sbref[0]; +
-                } +
-                my @dwi = find(file => 'name' => "sub-$subj*_dwi.bval", in =>  $subj_dir); +
-                if (-e $dwi[0] && -f $dwi[0]){ +
-                        ($mri{'dwi'} = $dwi[0]) =~ s/bval$/nii\.gz/; +
-                } +
-        } +
-        return %mri; +
-}+
  
-sub check_fs_subj { +            escape_name(string);
-        my $subj = shift; +
-        my $subj_dir = qx/echo \$SUBJECTS_DIR/; +
-        chomp($subj_dir); +
-        my $place = $subj_dir."/".$subj; +
-        my $ok = 0; +
-        # I must check if directory exist. +
-        if( -e $place && -d $place){$ok = 1;} +
-        return $ok; +
-}+
  
-sub get_lut { +trim
-        my $ifile = shift; +
-        my $patt = '\s*(\d{1,8})\s*([A-Z,a-z,\-,\_,\.,0-9]*)\s*.*'; +
-        my %aseg_data = map {/$patt/; $1=>$2} grep {/^$patt/} read_file $ifile; +
-        return %aseg_data; +
-}+
  
-sub run_dckey{ +    This function takes a string and remove any trailing spaces after and before the text
-        my @props = @_; +
-        my $order = "dckey -k $props[1] $props[0] 2\>\&1"; +
-        print "$order\n"; +
-        my $dckey = qx/$order/; +
-        chomp($dckey); +
-        $dckey =~ s/\s*//g; +
-        return $dckey; +
-}+
  
-sub dclokey{ +    usage:
-        my @props = @_; +
-        my $order = "dcdump $props[0] 2\>\&1 \| grep \"".$props[1]."\""; +
-        print "$order\n"; +
-        my $line = qx/$order/; +
-        (my $dckey) = $line =~ /.*VR=<\w{2}>\s*VL=<0x\d{3,4}[a-z]*>\s*<(.*)\s*>/; +
-        if($dckey){ +
-                $dckey =~ s/\s*//g; +
-        } +
-        return $dckey; +
-}+
  
-sub centiloid_fbb { +            trim(string);
-    my $suvr = shift; +
-    return 153.4*$suvr-154.9; +
-}+
  
-sub populate { +- check\_or\_make
-        my $patt = shift; +
-        my $csv = shift; +
-        my %pdata = map { /^$patt$/; $1 => $2} grep {/^$patt$/} read_file $csv; +
-        return %pdata; +
-}+
  
-sub get_subjects { +    This is mostly helpless, just takes a path
-        my $db = shift; +    checks if exists and create it otherwise
-        my @slist = map {/^(\d{4});.*$/; $1} grep { /^\d{4}/ } read_file($dbchomp => 1); +
-        return @slist; +
-}+
  
-sub get_list { +    usage:
-        my $ifile = shift; +
-        my @slist = map {/^(\d{4}).*$/; $1} grep { /^\d{4}/ }read_file($ifile, chomp => 1); +
-        return @slist; +
-}+
  
-sub get_pair { +            check_or_make(path);
-        my $ifile = shift; +
-        my %pet_data = map {/(.*);(.*)/; $1=>$2} read_file $ifile; +
-        return %pet_data; +
-}+
  
-sub shit_done { +inplace
-        my @adv = @_; +
-        my $msg = MIME::Lite->new( +
-                From    => "$ENV{'USER'}\@detritus.fundacioace.com", +
-                To      => "$ENV{'USER'}\@detritus.fundacioace.com", +
-                Subject => 'Script terminado', +
-                Type    => 'multipart/mixed', +
-        );+
  
-        $msg->attach( +    This function takes a path and a file name or two paths 
-                Type     => 'TEXT', +    and returns a string with a single path as result of 
-                Data     => "$adv[0] ha terminado en el estudio $adv[1].\n\n", +    the concatenation of the first one plus the second one
-        );+
  
-        $msg->attach( +    usage:
-                Type     => 'application/gzip', +
-                Path     => $adv[2], +
-                Filename => basename($adv[2]), +
-        );+
  
-        $msg->send; +            inplace(path, filename);
-}+
  
-sub cut_shit { +load\_project
-        my $db = shift; +
-        my $cfile = shift; +
-        my @plist = get_subjects($db); +
-        my @oklist; +
-        if (-e $cfile){ +
-                my @cuts = get_list($cfile); +
-                foreach my $cut (sort @cuts){ +
-                        if(grep {/$cut/} @plist){ +
-                                push @oklist, $cut; +
-                        } +
-                } +
-        }else{ +
-                @oklist = @plist; +
-        } +
-return @oklist; +
-}+
  
-sub escape_name { +    This function take the name of project, reads the configuration file 
-# in order to escape directory names with lot of strange symbols +    that is located at ~/.config/neuroand return every project configuration 
-        my $name = shift; +    stored as a hash that can be used at the scripts
-        $name=~s//\\\ /g; +
-        $name=~s/\`/\\\`/g; +
-        $name=~s/\(/\\\(/g; +
-        $name=~s/\)/\\\)/g; +
-        return $name; +
-}+
  
-</code>+    usage:
  
-===== FSMetrics =====+            load_project(project_name);
  
-  * fs_file_metrics -> devuelve las ordenes que se ejecutaran en la extraccion de las metricas de FS+check\_subj
  
-<code perl FSMetrics.pm> +    Here the fun begins
-#!/usr/bin/perl+
  
-use strict; use warnings; +    This function takes as input the name of the project and the subject ID 
-package FSMetrics; +    Then it seeks along the BIDS structure for this subject and returns a hash
-require Exporter; +    containing the MRI proper images.
-our @ISA                = qw(Exporter); +
-our @EXPORT             = qw(fs_file_metrics); +
-our @EXPORT_OK  = qw(fs_file_metrics); +
-our %EXPORT_TAGS        = (all => [qw(fs_file_metrics)]usual => [qw(fs_file_metrics)],); +
-our $VERSION    = 0.+
-+
-sub fs_file_metrics { +
-my %stats = ('wmparc_stats' => { +
-                'order' => "asegstats2table --subjects <list> --meas volume --skip --statsfile wmparc.stats --all-segs --tablefile <fs_output>/wmparc_stats.txt", +
-                'active' => 1, +
-        }, +
-        'aseg_stats' => { +
-                'order' => "asegstats2table --subjects <list> --meas volume --skip --tablefile <fs_output>/aseg_stats.txt", +
-                'active' => 1, +
-        }, +
-        'aparc_volume_lh' => { +
-                'order' => "aparcstats2table --subjects <list> --hemi lh --meas volume --skip --tablefile <fs_output>/aparc_volume_lh.txt", +
-                'active' => 1, +
-        }, +
-        'aparc_thickness_lh' => { +
-                'order' => "aparcstats2table --subjects <list> --hemi lh --meas thickness --skip --tablefile <fs_output>/aparc_thickness_lh.txt", +
-                'active' => 1, +
-        }, +
-        'aparc_area_lh' => { +
-                'order' => "aparcstats2table --subjects <list> --hemi lh --meas area --skip --tablefile <fs_output>/aparc_area_lh.txt", +
-                'active' => 1, +
-        }, +
-        'aparc_meancurv_lh' => { +
-                'order' => "aparcstats2table --subjects <list> --hemi lh --meas meancurv --skip --tablefile <fs_output>/aparc_meancurv_lh.txt", +
-        }, +
-        'aparc_volume_rh' => { +
-                'order' => "aparcstats2table --subjects <list> --hemi rh --meas volume --skip --tablefile <fs_output>/aparc_volume_rh.txt", +
-                'active' => 1, +
-        }, +
-        'aparc_thickness_rh' => { +
-                'order' => "aparcstats2table --subjects <list> --hemi rh --meas thickness --skip --tablefile <fs_output>/aparc_thickness_rh.txt", +
-                'active' => 1, +
-        }, +
-        'aparc_area_rh' => { +
-                'order' => "aparcstats2table --subjects <list> --hemi rh --meas area --skip --tablefile <fs_output>/aparc_area_rh.txt", +
-                'active' => 1, +
-        }, +
-        'aparc_meancurv_rh' => { +
-                'order' => "aparcstats2table --subjects <list> --hemi rh --meas meancurv --skip --tablefile <fs_output>/aparc_meancurv_rh.txt", +
-        }, +
-        'lh.a2009s.volume' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc aparc.a2009s --meas volume --skip -t <fs_output>/lh.a2009s.volume.txt", +
-        }, +
-        'lh.a2009s.thickness' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc aparc.a2009s --meas thickness --skip -t <fs_output>/lh.a2009s.thickness.txt", +
-        }, +
-        'lh.a2009s.area' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc aparc.a2009s --meas area --skip -t <fs_output>/lh.a2009s.area.txt", +
-        }, +
-        'lh.a2009s.meancurv' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc aparc.a2009s --meas meancurv --skip -t <fs_output>/lh.a2009s.meancurv.txt", +
-        }, +
-        'rh.a2009s.volume' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc aparc.a2009s --meas volume --skip -t <fs_output>/rh.a2009s.volume.txt", +
-        }, +
-        'rh.a2009s.thickness' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc aparc.a2009s --meas thickness --skip -t <fs_output>/rh.a2009s.thickness.txt", +
-        }, +
-        'rh.a2009s.area' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc aparc.a2009s --meas area --skip -t <fs_output>/rh.a2009s.area.txt", +
-        }, +
-        'rh.a2009s.meancurv' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc aparc.a2009s --meas meancurv --skip -t <fs_output>/rh.a2009s.meancurv.txt", +
-        }, +
-        'lh.BA.volume' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas volume --skip -t <fs_output>/lh.BA.volume.txt", +
-        }, +
-        'lh.BA.thickness' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas thickness --skip -t <fs_output>/lh.BA.thickness.txt", +
-        }, +
-        'lh.BA.area' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas area --skip -t <fs_output>/lh.BA.area.txt", +
-        }, +
-        'lh.BA.meancurv' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas meancurv --skip -t <fs_output>/lh.BA.meancurv.txt", +
-        }, +
-        'rh.BA.volume' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas volume --skip -t <fs_output>/rh.BA.volume.txt", +
-        }, +
-        'rh.BA.thickness' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas thickness --skip -t <fs_output>/rh.BA.thickness.txt", +
-        }, +
-       'lh.BA.area' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas area --skip -t <fs_output>/lh.BA.area.txt", +
-        }, +
-        'lh.BA.meancurv' => { +
-                'order' => "aparcstats2table --hemi lh --subjects <list> --parc BA --meas meancurv --skip -t <fs_output>/lh.BA.meancurv.txt", +
-        }, +
-        'rh.BA.volume' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas volume --skip -t <fs_output>/rh.BA.volume.txt", +
-        }, +
-        'rh.BA.thickness' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas thickness --skip -t <fs_output>/rh.BA.thickness.txt", +
-        }, +
-        'rh.BA.area' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas area --skip -t <fs_output>/rh.BA.area.txt", +
-        }, +
-        'rh.BA.meancurv' => { +
-                'order' => "aparcstats2table --hemi rh --subjects <list> --parc BA --meas meancurv --skip -t <fs_output>/rh.BA.meancurv.txt", +
-        }, +
-); +
-return %stats; +
-+
-</code>+
  
-//send2slurm// -> envia un //sbatch// descrito en el hash de input+    It should return a single value, except for the T1w images, where an array 
 +    is returned. This was though this way because mostly a single session is done. 
 +    However, the skill to detect more than one MRI was introduced to allow the 
 +    movement correction when ADNI images are analyzed
  
-===== SLURM.pm =====+    So, for T1w images the returned hash should be asked as
  
-<code perl SLURM.pm> +            @{$nifti{'T1w'}}
-#!/usr/bin/perl+
  
-# Copyright 2021 O. Sotolongo <asqwerty@gmail.com>+    but for other kind of image it should asked as
  
-# This program is free software; you can redistribute it and/or modify +            $nifti{'T2w'}
-# it under the terms of the GNU General Public License as published by +
-# the Free Software Foundation; either version 2 of the License, or +
-# (at your option) any later version. +
-+
-# This program is distributed in the hope that it will be useful, +
-# but WITHOUT ANY WARRANTY; without even the implied warranty of +
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +
-# GNU General Public License for more details.+
  
-use strict; use warnings; +    usage:
-package SLURM; +
-require Exporter;+
  
-our @ISA = qw(Exporter); +            check_subj(project_pathbids_id);
-our @EXPORT = qw(send2slurm); +
-our @EXPORT_OK = qw(send2slurm); +
-our %EXPORT_TAGS =(all => qw(send2slurm)usual => qw(send2slurm));+
  
-our $VERSION = 0.1;+- check\_pet
  
-sub define_task{ +    This function takes as input the name of the project and the subject ID 
-# default values for any task +    Then it seeks along the BIDS structure for this subject and returns a hash
- my %task; +    containing the PET proper images.
- $task{'mem_per_cpu'} = '4G'; +
- $task{'cpus'} = 1; +
- $task{'time'} = '2:0:0'; +
- my $label = sprintf("%03d"rand(1000)); +
- $task{'filename'} = 'slurm_'.$label.'.sh'; +
- $task{'output'} = 'slurm_'.$label.'.out'; +
- $task{'order'} = 'sbatch --parsable '.$task{'filename'}; +
- $task{'job_name'} = 'myjob'; +
- $task{'mailtype'} = 'FAIL,TIME_LIMIT,STAGE_OUT'; +
- return %task; +
-}+
  
-sub send2slurm{ +    If also a tracer is given as input, then the returned hash contains the PET-tau 
- my %task = %{$_[0]}; +    associated to this tracer. This was introduced as part of a project were the subjects 
- my %dtask = define_task(); +    were analyzed with different radiotracers. 
- my $scriptfile; + 
-        if(exists($task{'filename'}&& $task{'filename'}){ +    If no tracer is given, it will seek for the FBB PETs. Those PETs are stored as 
-                $scriptfile = $task{'filename'}; + 
-        }else{ +            - single: 4x5min 
-                $scriptfile = $dtask{'filename'}; +            - combined: 20min 
-        } + 
-        open ESS">$scriptfile" or die 'Could not create slurm script\n'; +    usage: 
- print ESS '#!/bin/bash'."\n"+ 
- print ESS '#SBATCH -J '; +            check_pet(project_path, bids_id, $optional_radiotracer); 
- if(exists($task{'job_name'}&& $task{'job_name'}){ + 
- print ESS $task{'job_name'}."\n"; +- check\_fs\_subj 
- }else{ + 
- print ESS $dtask{'job_name'}."\n"+    This function checks if the Freesurfer directory of a given subjects exists 
- } + 
- if(exists($task{'cpus'}&& $task{'cpus'}){ +    usage: 
- print ESS '#SBATCH -c '.$task{'cpus'}."\n"+ 
- print ESS '#SBATCH --mem-per-cpu='+            check_fs_subj(freesurfer_id) 
- if(exists($task{'mem_per_cpu'}&& $task{'mem_per_cpu'}){ + 
- print ESS $task{'mem_per_cpu'}."\n"; +- get\_lut 
- }else{ + 
- print ESS $dtask{'mem_per_cpu'}."\n"+    I really don't even remenber what this shit does 
- } + 
- } +- run\_dckey 
- if(exists($task{'time'}&& $task{'time'}){ + 
- print ESS '#SBATCH --time='.$task{'time'}."\n"; +    Get the content of a public tag from a DICOM file. 
- } + 
- if(exists($task{'output'}&& $task{'output'}){ +    usage: 
-                print ESS '#SBATCH -o '.$task{'output'}.'-%j'."\n"+ 
-        }else{ +            run_dckey(key, dicom
- print ESS '#SBATCH -o '.$dtask{'output'}.'-%j'."\n"; + 
- } +- dclokey 
- print ESS '#SBATCH --mail-user='."$ENV{'USER'}\n"; + 
- if(exists($task{'partition'}&& $task{'partition'}){ +    Get the content of a private tag from a DICOM file. 
- print ESS '#SBATCH -p '.$task{'partition'}."\n"; + 
- } +    usage: 
- if(exists($task{'command'}&& $task{'command'}){ + 
- if(exists($task{'mailtype'}&& $task{'mailtype'}){ +            dclokey(keydicom) 
- print ESS '#SBATCH --mail-type='.$task{'mailtype'}."\n"+ 
- }else{ +- centiloid\_fbb 
- print ESS '#SBATCH --mail-type='.$dtask{'mailtype'}."\n"+ 
- } +    Returns the proper centiloid value for a given SUVR. 
- print ESS $task{'command'}."\n"+    Only valid for FBB. 
- }else{ + 
- print ESS '#SBATCH --mail-type=END'."\n"; +    usage: 
- print ESS ":\n"+ 
- } +            centiloid_fbb(suvr)
- close ESS+ 
- my $order+populate 
- if(exists($task{'dependency'}&& $task{'dependency'}){ + 
- $order 'sbatch --parsable --dependency='.$task{'dependency'}.' '.$scriptfile; +    Takes a pattern and a filename and stores the content of the file 
- }else{ +    into a HASH according to the given pattern 
- $order = 'sbatch --parsable '.$scriptfile; + 
- } +    usage: 
- my $code qx/$order/+ 
- chomp $code+            populate(pattern, filename); 
- return $code; + 
-}+- get\_subjects 
 + 
 +    Parse a project database taking only the subjects and storing them into an array. 
 +    The databse is expected to be build as, 
 + 
 +            0000;name 
 + 
 +    usage: 
 + 
 +            get_subjects(filename)
 + 
 +get\_list 
 + 
 +    Parse a project database taking only the subjects and storing them into an array. 
 +    The databse is expected to be build with a four digits number at the beginning of 
 +    lineIs similar to get\_subjects() function but less restrictive 
 + 
 +    usage: 
 + 
 +            get_list(filename)
 + 
 +get\_pair 
 + 
 +    A single file is loaded as input and parse into a HASH. 
 +    The file should be written in the format: 
 + 
 +            key;value 
 + 
 +    usage: 
 + 
 +            get_pair(filename); 
 + 
 +- shit\_done 
 + 
 +    this function is intended to be used  after a script ends 
 +    and then an email is send to the user 
 +    with the name of the script, the name of the project and th results attached 
 + 
 +    usage: 
 + 
 +            shit_done(script_name, project_name, attached_file
 + 
 +- cut\_shit 
 + 
 +    This function takes a project database and a file with a list, then 
 +    returns the elements that are common to both. 
 +    It is intended to be used to restrict the scripts action 
 +    over a few elements. It returns a single array. 
 + 
 +    If it is correctly used, first the db is identified with 
 +    _load\_project()_ function and then passed through this function 
 +    to get the array of subjects to be analyzed. If the file with 
 +    the cutting list do not exist, an array with all the subjects 
 +    is returned. 
 + 
 +    usage: 
 + 
 +            cut_shit(db, list)
 + 
 +- getLoggingTime 
 + 
 +    This function returns a timestamp based string intended to be used 
 +    to make unique filenames 
 + 
 +    Stolen from Stackoverflow 
 + 
 +    usage: 
 + 
 +            getLoggingTime()
 +                                                                                                      
 +</markdown> 
 + 
 +<markdown> 
 + 
 +FSMetrics 
 + 
 +Bunch of helpers for storing ROI structure and relative data 
 + 
 +fs\_file\_metrics 
 + 
 +    This function does not read any inputIt sole purpose is to 
 +    returns a HASH containing the templates of order for converting Freesurfer (FS) 
 +    results into tables
 + 
 +    Any hash element is composed by the template ('order'), a boolean ('active'to decide 
 +    if the FS stats will be processed and the name of the FS stat file ('file'). 
 +    The order template has two wildcards (<list> and <fs\_output>) that should be 
 +    parsed and changed by the FS subject id and the output directory where the 
 +    data tables will be stored for each subject 
 + 
 +    The function could be invoked as, 
 + 
 +            my %stats = fs_file_metrics()
 + 
 +    in any script where this information would be needed. 
 + 
 +    The boolean element could be used to choose the stats that should 
 +    be processed and can be added or modified even at run time if neededThe 
 +    stored booleans only provided a decent default 
 + 
 +fs\_fbb\_rois 
 + 
 +    _deprecated_ 
 + 
 +    This function exports a HASH that contains the Freesurfer composition of the 
 +    usual segmentations used for building the SUVR ROI 
 + 
 +tau\_rois 
 + 
 +    This function takes a string as input and returns an ARRAY containing 
 +    the list of ROIs that should be build and where the SUVR should be calculated 
 + 
 +    It is intended to be used for PET-Tau but could be used anywhere 
 + 
 +    By default a list of Braak areas are returnedIf the input string is **alt** 
 +    a grouping of those Braak areas is returned. If the purpose is to build 
 +    a meta\_temporal ROI the string **meta** should be passed as input 
 + 
 +    The main idea here is read the corresponding file for each ROI, stored at 
 +    `PIPEDIR/lib/tau/` and build each ROI with the FS LUTs store there 
 + 
 +- pet\_rois 
 + 
 +    This function takes a string as input and returns an ARRAY containing 
 +    the list of ROIs that should be build and where the SUVR should be calculated 
 + 
 +    Input values are **parietal**, **frontal**, **pieces** or **global** (default
 + 
 +    The main idea here is read the corresponding file for each ROI, stored at 
 +    `PIPEDIR/lib/pet/` and build each ROI with the FS LUTs stored there 
 +     
 +</markdown> 
 + 
 +<markdown> 
 +SLURM 
 + 
 +This module contains just a function to send the jobs to SLURM 
 +from the Perl scripts 
 + 
 +send2slurm 
 + 
 +    The function takes a HASH as input where all the information 
 +    relative to the job should be storedNo data is mandatory 
 +    inside the input HASH, since the minimal values are automagicaly 
 +    asigned by default as a constructor (no really, but anyway)
 + 
 +    Take into account that this subroutine only pass the parameters 
 +    to SLURM. So, the logic behind your actions should correspond 
 +    to what you want to do in any case, exactly as if you were 
 +    writing sbatch scripts. 
 + 
 +    The managed options for SLURM jobs are: 
 + 
 +            - filename: File where the sbatch script will be stored 
 +            - job_name: Job name for SLURM (-J) 
 +            - cpus: Number of CPUs to be used by each job (-c) 
 +            - mem_per_cpu: Amount of memory to be used for each CPU (--mem-per-cpu
 +            - time: Maximum time that the job will be allowed to run (--time) 
 +            - output: File where the sbatch script output will be stored (-o) 
 +            - partition: SLURM partition to be used (-p
 +            - gres: GPUs to be used (--gres) 
 +            - command: Command to be executed at sbatch script 
 +            - mailtype: Type of warning to be emailed (--mail-type
 +            - dependency: Full dependency string to be used at sbatch execution (--dependency), see more below 
 + 
 +    The function returns the jobid of the queued job, so it can be used to 
 +    build complex workflows. 
 + 
 +    usage: my $job\_id = send2slurm(\\%job\_properties)
 + 
 +    Warning email: By default, if an empty HASH is passed to the function, 
 +    a no command sbatch script is launched 
 +    with _--mail-type=END_ optionThe intention is that this could be used to 
 +    warn at the end of any launched swarm. Also, by changing **mailtype** but 
 +    ommiting the **command** value you can force the function to execute 
 +    an empty sbatch job with whatever warning behavior that you choose. 
 + 
 +    Dependencies: If dependencies are going to be used, you need to pass to 
 +    the function the full string that SLURM expects. That is, you can pass something 
 +    like _singleton_ or _after:000000_ or even _afterok:000000,000001,000002_. 
 +    This last can be build, by example, storing every previous jobid into an ARRAY 
 +    and passing then as, 
 + 
 +            ... 
 +                    my $jobid = send2slurm(\%previous)
 +                    push @jobids, $jobid; 
 +            ... 
 +            $task{'dependency'= 'afterok:'.join(',',@jobids); 
 +            ... 
 +            send2slurm(\%task)
 + 
 +    Of course, if dependencies are not going to be used, the 
 +    **dependency** option could be safely ignored. But notice that, if you are 
 +    reusing a HASH then this key should be deleted from it. 
 +</markdown> 
 + 
 +<markdown> 
 +XNATACE 
 + 
 +xconf 
 + 
 +    Publish path of xnatapic configuration file 
 + 
 +    usage: 
 + 
 +            $path = xconf(); 
 + 
 +xget\_conf 
 + 
 +    Get the XNAT connection data into a HASH 
 + 
 +    usage: 
 + 
 +            %xnat_data xget_conf() 
 + 
 +- xget\_pet 
 + 
 +    Get the XNAT PET experiment ID 
 + 
 +    usage: 
 + 
 +            xget_pet(host, jsession, project, subject) 
 + 
 +- xget\_mri 
 + 
 +    Get the XNAT MRI experiment ID 
 + 
 +    usage: 
 + 
 +            xget_mri(host, jsession, project, subject) 
 + 
 +- xget\_fs\_data 
 +Get the full Freesurfer directory in a tar.gz file 
 + 
 +    usage: 
 + 
 +            xget_fs_data(host, jsession, project, experiment, output_path) 
 + 
 +- xget\_fs\_stats 
 + 
 +    Get a single stats file from Freesurfer segmentation 
 + 
 +    usage: 
 + 
 +            xget_fs_stats(host, jsession, experiment stats_file, output_file) 
 + 
 +- xget\_session 
 + 
 +    Create a new JSESSIONID on XNAT. Return the connection data 
 +    for the server AND the ID of the created session 
 + 
 +    usage: 
 + 
 +            xget_session()
 + 
 +- xput\_report 
 + 
 +    Upload a pdf report to XNAT 
 + 
 +    usage: 
 + 
 +            xput_report(host, jsession, subject, experiment, pdf_file)
 + 
 +- xput\_rvr 
 + 
 +    Upload a JSON file with VR data 
 + 
 +    usage: 
 + 
 +            xput_rvr(host, jsession, experiment, json_file)
 + 
 +- xget\_rvr 
 + 
 +    Get VR results into a HASH. Output is a hash with filenames and URI of each element stored at RVR 
 + 
 +    usage: 
 + 
 +            xget_rvr(host, jsession, project, experiment); 
 + 
 +- xget\_rvr\_data 
 + 
 +    Get RVR JSON data into a hash 
 + 
 +    usage: 
 + 
 +            xget_rvr_data(host, jsession, URI)
 + 
 +- xget\_subjects 
 + 
 +    Get the list of subjects of a project into a HASH. 
 +    El HASH de input, _%sbjs_, se construye como _XNAT\_ID => Label }_ 
 + 
 +    usage: 
 + 
 +            %sbjs xget_subjects(host, jsession, project); 
 + 
 +xget\_pet\_reg 
 + 
 +    Download de pet registered into native space in nifti format 
 + 
 +    usage: 
 + 
 +            xget_pet_reg(host, jsession, experiment, nifti_output); 
 + 
 +xget\_pet\_data 
 + 
 +    Get the PET FBB analysis results into a HASH 
 + 
 +    usage: 
 + 
 +            %xresult xget_pet_data(host, jsession, experiment); 
 + 
 +- xget\_exp\_data 
 + 
 +    Get a data field of an experiment. 
 +    The desired field shoud be indicated as input
 +    By example, if you want the date of the experiment this is 
 +    seeked as 
 + 
 +            my $xdate xget_exp_data($host, $session_id, $experiment, 'date'
 + 
 +    There are some common fields as _date_, _label_ or _dcmPatientId_ 
 +    but in general  you should look at, 
 + 
 +            curl -X GET -b JSESSIONID=00000blahblah "http://myhost/data/experiments/myexperiment?format=json" 2>/dev/null | jq '.items[0].data_fields' 
 + 
 +    in order to know the available fields 
 + 
 +    usage: 
 + 
 +            $xdata xget_exp_data(host, jsession, experiment, field)
 + 
 +- xget\_sbj\_data 
 + 
 +    Get the subjects metadata. Not too 
 +    much interesting but to extract 
 +    the subject label. 
 + 
 +    usage: 
 + 
 +            $xdata = xget_sbj_data(host, jsession, subject, field)
 +                                                                                                      
 + 
 +</markdown> 
 +====== Dependencias ====== 
 + 
 +<code> 
 +Data::Dump 
 +File::Slurp 
 +File::Basename 
 +File::Temp 
 +File::Copy::Recursive 
 +File::Copy 
 +File::Find::Rule 
 +File::Remove 
 +Cwd 
 +Spreadsheet::Write 
 +Text::CSV 
 +File::Path 
 +MIME::Lite 
 +JSON
 </code> </code>
 +
 +
 +
 +
 +
neuroimagen/neuro4.pm.1613728035.txt.gz · Last modified: 2021/02/19 09:47 by osotolongo