This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
neuroimagen:neuro4.pm [2019/12/09 09:21] osotolongo |
neuroimagen:neuro4.pm [2022/05/17 07:49] (current) osotolongo [NEURO4] |
||
---|---|---|---|
Line 3: | Line 3: | ||
Esta pagina es solo de referencia de los modulos Perl que dan soporte al pipeline de neuroimagen. | Esta pagina es solo de referencia de los modulos Perl que dan soporte al pipeline de neuroimagen. | ||
- | ===== NEURO4.pm ===== | + | < |
+ | # NEURO4 | ||
- | Subrutinas: | + | This is a set of functions for helping in the pipeline |
- | * sub print_help -> Imprime la ayuda de un script, tomandola del directorio //docs// | + | |
- | * sub escape_name -> Escapa caracteres molestos de los nombres de directorio | + | |
- | * sub trim -> Elimina espacios, antes y despues de un string | + | |
- | * sub check_or_make -> crea un directorio si no existe | + | |
- | * sub load_project -> lee las variables de un proyecto dentro de un hash | + | |
- | * sub check_subj -> lee la estructura BIDS de un sujeto dentro de un hash | + | |
- | * sub check_fs_subj -> comprueba que un sujeto se haya procesado en FS | + | |
- | * sub get_lut -> | + | |
- | * sub run_dckey -> corre //dckey// sobre un DICOM y saca el valor de un tag | + | |
- | * sub dclokey -> devuelve el valor de un tag oculto en un DICOM | + | |
- | * sub centiloid_fbb -> devuelve el valor de Centiloide para un valor de SUVR en FBB | + | |
- | * sub populate -> Lee un archivo de CSV (o texto plano) y devuelve un hash con los valores, segun el patron que se suministra | + | |
- | * sub get_subjects -> Devuelve un array con los sujetos de un proyecto contenidos en un archivo de DB del proyecto | + | |
- | * sub get_list -> Devuelve un array con los sujetos de un proyecto contenidos, ignora el formato posterior | + | |
- | * sub get_pair -> devuelve un hash que contiene una DB de proyecto | + | |
- | * sub shit_done -> Envia un email con un archivo comprimido de attachment | + | |
- | * sub cut_shit -> Tomando como argumentos la DB de un proyecto y una lista simple de sujetos, devuelve un array con los sujetos de la lista incluidos en el proyecto. | + | |
+ | - print\_help | ||
- | <code perl NEURO4.pm> | + | just print the help |
- | # | + | |
- | use strict; use warnings; | + | this funtions reads the path of a TXT file and print it at STDOUT |
- | package NEURO4; | + | |
- | require Exporter; | + | |
- | use File::Slurp qw(read_file); | + | |
- | use File:: | + | |
- | use Mail:: | + | |
- | use MIME:: | + | |
- | use File:: | + | |
- | our @ISA = qw(Exporter); | + | usage: |
- | our @EXPORT | + | |
- | our @EXPORT_OK | + | |
- | our %EXPORT_TAGS | + | |
- | usual => [qw(print_help load_project check_or_make cut_shit)], | + | |
- | our $VERSION | + | |
- | sub print_help | + | |
- | # just print the help | + | |
- | my $hlp = shift; | + | |
- | open HELP, "< | + | |
- | while(< | + | |
- | print; | + | |
- | } | + | |
- | close HELP; | + | |
- | return; | + | |
- | } | + | |
- | sub trim { | + | - escape\_name |
- | my $string = shift; | + | |
- | $string =~ s/^\s+//; #trim leading space | + | |
- | $string =~ s/ | + | |
- | return $string; | + | |
- | } | + | |
- | sub check_or_make { | + | This function takes a string and remove some especial characters |
- | my $place = shift; | + | in order to escape |
- | # I must check if directory | + | |
- | if(opendir(TEST, | + | |
- | closedir TEST; | + | |
- | }else{ | + | |
- | mkdir $place; | + | |
- | } | + | |
- | } | + | |
- | sub load_project { | + | It returns the escaped string |
- | my $study = shift; | + | |
- | my %stdenv = map {/(.*) = (.*)/; $1=>$2 } read_file $ENV{HOME}."/ | + | |
- | return %stdenv; | + | |
- | } | + | |
- | sub check_subj { | + | usage: |
- | my $proj_path = shift; | + | |
- | my $subj = shift; | + | |
- | my %mri = (' | + | |
- | my $subj_dir = $proj_path.'/ | + | |
- | if( -e $subj_dir && -d $subj_dir){ | + | |
- | my @t1 = find(file => ' | + | |
- | if (-e $t1[0] && -f $t1[0]){ | + | |
- | $mri{' | + | |
- | } | + | |
- | my @t2 = find(file => ' | + | |
- | if (-e $t2[0] && -f $t2[0]){ | + | |
- | $mri{' | + | |
- | } | + | |
- | } | + | |
- | $subj_dir = $proj_path.'/ | + | |
- | if( -e $subj_dir && -d $subj_dir){ | + | |
- | my @dwi_sbref = find(file => ' | + | |
- | if (-e $dwi_sbref[0] && -f $dwi_sbref[0]){ | + | |
- | $mri{' | + | |
- | } | + | |
- | my @dwi = find(file => ' | + | |
- | if (-e $dwi[0] && -f $dwi[0]){ | + | |
- | ($mri{' | + | |
- | } | + | |
- | } | + | |
- | return %mri; | + | |
- | } | + | |
- | sub check_fs_subj { | + | escape_name(string); |
- | my $subj = shift; | + | |
- | my $subj_dir = qx/echo \$SUBJECTS_DIR/; | + | |
- | chomp($subj_dir); | + | |
- | my $place = $subj_dir."/" | + | |
- | my $ok = 0; | + | |
- | # I must check if directory exist. | + | |
- | if( -e $place && -d $place){$ok = 1;} | + | |
- | return $ok; | + | |
- | } | + | |
- | sub get_lut { | + | - trim |
- | my $ifile = shift; | + | |
- | my $patt = ' | + | |
- | my %aseg_data = map {/$patt/; $1=>$2} grep {/^$patt/} read_file $ifile; | + | |
- | return %aseg_data; | + | |
- | } | + | |
- | sub run_dckey{ | + | This function takes a string and remove any trailing spaces after and before the text |
- | my @props = @_; | + | |
- | my $order = "dckey -k $props[1] $props[0] 2\> | + | |
- | print " | + | |
- | my $dckey = qx/ | + | |
- | chomp($dckey); | + | |
- | $dckey =~ s/\s*//g; | + | |
- | return $dckey; | + | |
- | } | + | |
- | sub dclokey{ | + | usage: |
- | my @props = @_; | + | |
- | my $order = " | + | |
- | print " | + | |
- | my $line = qx/ | + | |
- | (my $dckey) = $line =~ / | + | |
- | if($dckey){ | + | |
- | $dckey =~ s/\s*//g; | + | |
- | } | + | |
- | return $dckey; | + | |
- | } | + | |
- | sub centiloid_fbb { | + | trim(string); |
- | my $suvr = shift; | + | |
- | return 153.4*$suvr-154.9; | + | |
- | } | + | |
- | sub populate { | + | - check\_or\_make |
- | my $patt = shift; | + | |
- | my $csv = shift; | + | |
- | my %pdata = map { /^$patt$/; $1 => $2} grep {/^$patt$/} read_file $csv; | + | |
- | return %pdata; | + | |
- | } | + | |
- | sub get_subjects { | + | This is mostly helpless, just takes a path, |
- | my $db = shift; | + | |
- | my @slist = map {/ | + | |
- | return @slist; | + | |
- | } | + | |
- | sub get_list { | + | usage: |
- | my $ifile = shift; | + | |
- | my @slist = map {/ | + | |
- | return @slist; | + | |
- | } | + | |
- | sub get_pair { | + | check_or_make(path); |
- | my $ifile = shift; | + | |
- | my %pet_data = map {/(.*);(.*)/; $1=>$2} read_file $ifile; | + | |
- | return %pet_data; | + | |
- | } | + | |
- | sub shit_done { | + | - inplace |
- | my @adv = @_; | + | |
- | my $msg = MIME::Lite->new( | + | |
- | From => " | + | |
- | To => " | + | |
- | Subject => ' | + | |
- | Type => ' | + | |
- | ); | + | |
- | $msg-> | + | |
- | Type | + | and returns a string with a single path as result of |
- | | + | the concatenation of the first one plus the second one |
- | ); | + | |
- | $msg-> | + | |
- | Type | + | |
- | Path => $adv[2], | + | |
- | Filename => basename($adv[2]), | + | |
- | ); | + | |
- | $msg-> | + | inplace(path, |
- | } | + | |
- | sub cut_shit { | + | - load\_project |
- | my $db = shift; | + | |
- | my $cfile = shift; | + | |
- | my @plist = get_subjects($db); | + | |
- | my @oklist; | + | |
- | if (-e $cfile){ | + | |
- | my @cuts = get_list($cfile); | + | |
- | foreach my $cut (sort @cuts){ | + | |
- | if(grep {/$cut/} @plist){ | + | |
- | push @oklist, $cut; | + | |
- | } | + | |
- | } | + | |
- | }else{ | + | |
- | @oklist = @plist; | + | |
- | } | + | |
- | return @oklist; | + | |
- | } | + | |
- | sub escape_name { | + | This function take the name of a project, reads the configuration file |
- | # in order to escape directory names with a lot of strange symbols | + | that is located at ~/.config/neuro/ and return |
- | my $name = shift; | + | |
- | $name=~s/\ /\\\ /g; | + | |
- | $name=~s/ | + | |
- | $name=~s/ | + | |
- | $name=~s/ | + | |
- | | + | |
- | } | + | |
- | </ | + | usage: |
- | ===== FSMetrics ===== | + | load_project(project_name); |
- | * fs_file_metrics | + | - check\_subj |
- | <code perl FSMetrics.pm> | + | Here the fun begins |
- | # | + | |
- | use strict; use warnings; | + | This function takes as input the name of the project and the subject ID |
- | package FSMetrics; | + | Then it seeks along the BIDS structure for this subject and returns a hash, |
- | require Exporter; | + | |
- | our @ISA = qw(Exporter); | + | |
- | our @EXPORT | + | It should return a single value, except for the T1w images, where an array |
- | our @EXPORT_OK | + | is returned. This was though this way because mostly a single session is done. |
- | our %EXPORT_TAGS | + | |
- | our $VERSION | + | movement correction when ADNI images are analyzed |
- | ; | + | |
- | sub fs_file_metrics | + | So, for T1w images the returned hash should be asked as |
- | my %stats = ('wmparc_stats' | + | |
- | ' | + | @{$nifti{' |
- | ' | + | |
- | }, | + | but for other kind of image it should asked as |
- | ' | + | |
- | ' | + | |
- | ' | + | |
- | }, | + | usage: |
- | ' | + | |
- | ' | + | check_subj(project_path, |
- | ' | + | |
- | }, | + | - check\_pet |
- | ' | + | |
- | ' | + | This function takes as input the name of the project and the subject ID |
- | ' | + | Then it seeks along the BIDS structure for this subject and returns a hash, |
- | }, | + | containing the PET proper images. |
- | ' | + | |
- | ' | + | If also a tracer is given as input, then the returned hash contains the PET-tau |
- | ' | + | associated to this tracer. This was introduced as part of a project were the subjects |
- | }, | + | were analyzed with different radiotracers. |
- | ' | + | |
- | ' | + | If no tracer is given, it will seek for the FBB PETs. Those PETs are stored as |
- | }, | + | |
- | ' | + | |
- | ' | + | |
- | ' | + | |
- | }, | + | |
- | ' | + | |
- | ' | + | check_pet(project_path, bids_id, $optional_radiotracer); |
- | ' | + | |
- | }, | + | - check\_fs\_subj |
- | ' | + | |
- | ' | + | This function checks if the Freesurfer directory of a given subjects |
- | | + | |
- | }, | + | usage: |
- | | + | |
- | ' | + | check_fs_subj(freesurfer_id) |
- | }, | + | |
- | ' | + | - get\_lut |
- | ' | + | |
- | }, | + | I really don't even remenber what this shit does |
- | ' | + | |
- | ' | + | - run\_dckey |
- | }, | + | |
- | ' | + | Get the content of a public tag from a DICOM file. |
- | ' | + | |
- | }, | + | |
- | ' | + | |
- | ' | + | run_dckey(key, dicom) |
- | }, | + | |
- | ' | + | - dclokey |
- | ' | + | |
- | }, | + | Get the content of a private tag from a DICOM file. |
- | ' | + | |
- | ' | + | |
- | }, | + | |
- | ' | + | dclokey(key, dicom) |
- | ' | + | |
- | }, | + | - centiloid\_fbb |
- | 'rh.a2009s.meancurv' | + | |
- | ' | + | Returns the proper centiloid value for a given SUVR. |
- | }, | + | Only valid for FBB. |
- | ' | + | |
- | ' | + | usage: |
- | }, | + | |
- | ' | + | centiloid_fbb(suvr); |
- | | + | |
- | }, | + | - populate |
- | ' | + | |
- | ' | + | Takes a pattern and a filename and stores the content of the file |
- | }, | + | into a HASH according to the given pattern |
- | ' | + | |
- | ' | + | usage: |
- | }, | + | |
- | ' | + | populate(pattern, filename); |
- | ' | + | |
- | }, | + | - get\_subjects |
- | ' | + | |
- | ' | + | Parse a project database taking only the subjects |
- | }, | + | The databse is expected to be build as, |
- | ' | + | |
- | ' | + | |
- | }, | + | |
- | | + | usage: |
- | ' | + | |
- | }, | + | get_subjects(filename); |
- | ' | + | |
- | ' | + | - get\_list |
- | }, | + | |
- | | + | Parse a project database taking only the subjects |
- | ' | + | The databse is expected to be build with a four digits number at the beginning of |
- | }, | + | line. Is similar to get\_subjects() function but less restrictive |
- | ' | + | |
- | ' | + | usage: |
- | }, | + | |
- | ' | + | get_list(filename); |
- | ' | + | |
- | }, | + | - get\_pair |
- | ); | + | |
- | return %stats; | + | A single file is loaded as input and parse into a HASH. |
- | } | + | The file should be written in the format: |
+ | |||
+ | key;value | ||
+ | |||
+ | usage: | ||
+ | |||
+ | get_pair(filename); | ||
+ | |||
+ | - shit\_done | ||
+ | |||
+ | this function is intended to be used after a script ends | ||
+ | and then an email is send to the user | ||
+ | with the name of the script, the name of the project and th results attached | ||
+ | |||
+ | | ||
+ | |||
+ | shit_done(script_name, project_name, | ||
+ | |||
+ | - cut\_shit | ||
+ | |||
+ | This function takes a project database and a file with a list, then | ||
+ | returns the elements that are common to both. | ||
+ | It is intended to be used to restrict the scripts action | ||
+ | over a few elements. It returns a single array. | ||
+ | |||
+ | If it is correctly used, first the db is identified with | ||
+ | | ||
+ | to get the array of subjects to be analyzed. If the file with | ||
+ | the cutting list do not exist, an array with all the subjects | ||
+ | is returned. | ||
+ | |||
+ | usage: | ||
+ | |||
+ | cut_shit(db, list); | ||
+ | |||
+ | - getLoggingTime | ||
+ | |||
+ | This function returns a timestamp based string intended to be used | ||
+ | to make unique filenames | ||
+ | |||
+ | Stolen from Stackoverflow | ||
+ | |||
+ | usage: | ||
+ | |||
+ | getLoggingTime(); | ||
+ | |||
+ | </markdown> | ||
+ | |||
+ | <markdown> | ||
+ | |||
+ | # FSMetrics | ||
+ | |||
+ | Bunch of helpers for storing ROI structure and relative data | ||
+ | |||
+ | - fs\_file\_metrics | ||
+ | |||
+ | This function does not read any input. It sole purpose is to | ||
+ | | ||
+ | | ||
+ | |||
+ | Any hash element is composed by the template (' | ||
+ | if the FS stats will be processed and the name of the FS stat file ('file'). | ||
+ | | ||
+ | parsed and changed by the FS subject id and the output directory where the | ||
+ | data tables will be stored for each subject | ||
+ | |||
+ | The function could be invoked as, | ||
+ | |||
+ | my %stats = fs_file_metrics(); | ||
+ | |||
+ | in any script where this information would be needed. | ||
+ | |||
+ | The boolean element could be used to choose the stats that should | ||
+ | be processed and can be added or modified even at run time if needed. The | ||
+ | stored booleans only provided a decent default | ||
+ | |||
+ | - fs\_fbb\_rois | ||
+ | |||
+ | _deprecated_ | ||
+ | |||
+ | This function exports a HASH that contains the Freesurfer composition of the | ||
+ | usual segmentations used for building the SUVR ROI | ||
+ | |||
+ | - tau\_rois | ||
+ | |||
+ | This function takes a string as input and returns an ARRAY containing | ||
+ | the list of ROIs that should be build and where the SUVR should be calculated | ||
+ | |||
+ | It is intended to be used for PET-Tau but could be used anywhere | ||
+ | |||
+ | By default a list of Braak areas are returned. If the input string is **alt** | ||
+ | a grouping of those Braak areas is returned. If the purpose is to build | ||
+ | a meta\_temporal ROI the string **meta** should be passed as input | ||
+ | |||
+ | The main idea here is read the corresponding file for each ROI, stored at | ||
+ | `PIPEDIR/ | ||
+ | |||
+ | - pet\_rois | ||
+ | |||
+ | This function takes a string as input and returns an ARRAY containing | ||
+ | the list of ROIs that should be build and where the SUVR should be calculated | ||
+ | |||
+ | Input values are **parietal**, **frontal**, | ||
+ | |||
+ | The main idea here is read the corresponding file for each ROI, stored at | ||
+ | | ||
+ | |||
+ | </ | ||
+ | |||
+ | <markdown> | ||
+ | # SLURM | ||
+ | |||
+ | This module contains just a function to send the jobs to SLURM | ||
+ | from the Perl scripts | ||
+ | |||
+ | - send2slurm | ||
+ | |||
+ | The function takes a HASH as input where all the information | ||
+ | relative to the job should be stored. No data is mandatory | ||
+ | inside the input HASH, since the minimal values are automagicaly | ||
+ | asigned by default as a constructor (no really, but anyway). | ||
+ | |||
+ | Take into account that this subroutine only pass the parameters | ||
+ | to SLURM. So, the logic behind your actions should correspond | ||
+ | to what you want to do in any case, exactly as if you were | ||
+ | | ||
+ | |||
+ | The managed options for SLURM jobs are: | ||
+ | |||
+ | | ||
+ | | ||
+ | | ||
+ | | ||
+ | - time: Maximum time that the job will be allowed to run (--time) | ||
+ | | ||
+ | | ||
+ | | ||
+ | | ||
+ | - mailtype: Type of warning to be emailed (--mail-type) | ||
+ | | ||
+ | |||
+ | The function returns the jobid of the queued job, so it can be used to | ||
+ | build complex workflows. | ||
+ | |||
+ | usage: my $job\_id | ||
+ | |||
+ | Warning email: By default, if an empty HASH is passed to the function, | ||
+ | a no command sbatch script is launched | ||
+ | with _--mail-type=END_ option. The intention is that this could be used to | ||
+ | warn at the end of any launched swarm. Also, by changing **mailtype** but | ||
+ | ommiting the **command** value you can force the function to execute | ||
+ | an empty sbatch job with whatever warning behavior that you choose. | ||
+ | |||
+ | Dependencies: | ||
+ | the function the full string that SLURM expects. That is, you can pass something | ||
+ | like _singleton_ or _after: | ||
+ | This last can be build, by example, storing every previous jobid into an ARRAY | ||
+ | and passing then as, | ||
+ | |||
+ | ... | ||
+ | my $jobid = send2slurm(\%previous); | ||
+ | push @jobids, $jobid; | ||
+ | ... | ||
+ | $task{'dependency' | ||
+ | | ||
+ | send2slurm(\%task); | ||
+ | |||
+ | Of course, if dependencies are not going to be used, the | ||
+ | | ||
+ | | ||
+ | </ | ||
+ | |||
+ | <markdown> | ||
+ | # XNATACE | ||
+ | |||
+ | - xconf | ||
+ | |||
+ | Publish path of xnatapic configuration file | ||
+ | |||
+ | usage: | ||
+ | |||
+ | $path = xconf(); | ||
+ | |||
+ | - xget\_conf | ||
+ | |||
+ | Get the XNAT connection data into a HASH | ||
+ | |||
+ | usage: | ||
+ | |||
+ | %xnat_data = xget_conf() | ||
+ | |||
+ | - xget\_pet | ||
+ | |||
+ | Get the XNAT PET experiment ID | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_pet(host, | ||
+ | |||
+ | - xget\_mri | ||
+ | |||
+ | Get the XNAT MRI experiment ID | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_mri(host, | ||
+ | |||
+ | - xget\_fs\_data | ||
+ | Get the full Freesurfer directory in a tar.gz file | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_fs_data(host, jsession, project, experiment, output_path) | ||
+ | |||
+ | - xget\_fs\_stats | ||
+ | |||
+ | Get a single stats file from Freesurfer segmentation | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_fs_stats(host, jsession, experiment stats_file, output_file) | ||
+ | |||
+ | - xget\_session | ||
+ | |||
+ | Create a new JSESSIONID on XNAT. Return the connection data | ||
+ | for the server AND the ID of the created session | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_session(); | ||
+ | |||
+ | - xput\_report | ||
+ | |||
+ | Upload a pdf report to XNAT | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xput_report(host, | ||
+ | |||
+ | - xput\_rvr | ||
+ | |||
+ | Upload a JSON file with VR data | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xput_rvr(host, | ||
+ | |||
+ | - xget\_rvr | ||
+ | |||
+ | Get VR results into a HASH. Output is a hash with filenames and URI of each element stored at RVR | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_rvr(host, jsession, project, experiment); | ||
+ | |||
+ | - xget\_rvr\_data | ||
+ | |||
+ | Get RVR JSON data into a hash | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_rvr_data(host, jsession, URI); | ||
+ | |||
+ | - xget\_subjects | ||
+ | |||
+ | Get the list of subjects of a project into a HASH. | ||
+ | El HASH de input, _%sbjs_, se construye como _{ XNAT\_ID | ||
+ | |||
+ | usage: | ||
+ | |||
+ | %sbjs = xget_subjects(host, jsession, project); | ||
+ | |||
+ | - xget\_pet\_reg | ||
+ | |||
+ | Download de pet registered into native space in nifti format | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_pet_reg(host, jsession, experiment, nifti_output); | ||
+ | |||
+ | - xget\_pet\_data | ||
+ | |||
+ | Get the PET FBB analysis results into a HASH | ||
+ | |||
+ | usage: | ||
+ | |||
+ | %xresult | ||
+ | |||
+ | - xget\_exp\_data | ||
+ | |||
+ | Get a data field of an experiment. | ||
+ | The desired field shoud be indicated as input. | ||
+ | By example, if you want the date of the experiment this is | ||
+ | | ||
+ | |||
+ | my $xdate = xget_exp_data($host, | ||
+ | |||
+ | There are some common fields as _date_, _label_ or _dcmPatientId_ | ||
+ | but in general | ||
+ | |||
+ | curl -X GET -b JSESSIONID=00000blahblah | ||
+ | |||
+ | in order to know the available fields | ||
+ | |||
+ | usage: | ||
+ | |||
+ | $xdata | ||
+ | |||
+ | - xget\_sbj\_data | ||
+ | |||
+ | Get the subjects | ||
+ | much interesting but to extract | ||
+ | the subject label. | ||
+ | |||
+ | usage: | ||
+ | |||
+ | $xdata = xget_sbj_data(host, jsession, subject, field); | ||
+ | |||
+ | |||
+ | </ | ||
+ | ====== Dependencias ====== | ||
+ | |||
+ | <code> | ||
+ | Data::Dump | ||
+ | File::Slurp | ||
+ | File:: | ||
+ | File:: | ||
+ | File:: | ||
+ | File:: | ||
+ | File:: | ||
+ | File:: | ||
+ | Cwd | ||
+ | Spreadsheet:: | ||
+ | Text::CSV | ||
+ | File:: | ||
+ | MIME:: | ||
+ | JSON | ||
</ | </ | ||
+ | |||
+ | |||
+ | |||
+ | |||
+ |