This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision | ||
neuroimagen:neuro4.pm [2019/12/09 08:52] osotolongo |
neuroimagen:neuro4.pm [2022/05/17 07:49] (current) osotolongo [NEURO4] |
||
---|---|---|---|
Line 1: | Line 1: | ||
- | ====== | + | ====== |
- | Esta pagina es solo de referencia | + | Esta pagina es solo de referencia |
- | Subrutinas: | + | < |
- | * sub print_help -> Imprime la ayuda de un script, tomandola del directorio //docs// | + | # NEURO4 |
- | * sub escape_name -> Escapa caracteres molestos de los nombres de directorio | + | |
- | * sub trim -> Elimina espacios, antes y despues de un string | + | |
- | * sub check_or_make -> crea un directorio si no existe | + | |
- | * sub load_project -> lee las variables de un proyecto dentro de un hash | + | |
- | * sub check_subj -> lee la estructura BIDS de un sujeto dentro de un hash | + | |
- | * sub check_fs_subj -> comprueba que un sujeto se haya procesado en FS | + | |
- | * sub get_lut -> | + | |
- | * sub run_dckey -> corre //dckey// sobre un DICOM y saca el valor de un tag | + | |
- | * sub dclokey -> devuelve el valor de un tag oculto en un DICOM | + | |
- | * sub centiloid_fbb -> devuelve el valor de Centiloide para un valor de SUVR en FBB | + | |
- | * sub populate -> Lee un archivo de CSV (o texto plano) y devuelve un hash con los valores, segun el patron que se suministra | + | |
- | * sub get_subjects -> Devuelve un array con los sujetos de un proyecto contenidos en un archivo de DB del proyecto | + | |
- | * sub get_list -> Devuelve un array con los sujetos de un proyecto contenidos, ignora el formato posterior a los primero 4 numeros | + | |
- | * sub get_pair -> devuelve un hash que contiene una DB de proyecto | + | |
- | * sub shit_done -> Envia un email con un archivo comprimido de attachment | + | |
- | * sub cut_shit -> Tomando como argumentos la DB de un proyecto y una lista simple de sujetos, devuelve un array con los sujetos de la lista incluidos en el proyecto. | + | |
+ | This is a set of functions for helping in the pipeline | ||
- | <code perl NEURO4.pm> | + | - print\_help |
- | # | + | |
- | use strict; use warnings; | + | just print the help |
- | package NEURO4; | + | |
- | require Exporter; | + | |
- | use File::Slurp qw(read_file); | + | |
- | use File:: | + | |
- | use Mail:: | + | |
- | use MIME:: | + | |
- | use File:: | + | |
- | our @ISA = qw(Exporter); | + | this funtions reads the path of a TXT file and print it at STDOUT |
- | our @EXPORT | + | |
- | our @EXPORT_OK | + | |
- | our %EXPORT_TAGS | + | |
- | usual => [qw(print_help load_project check_or_make cut_shit)], | + | |
- | our $VERSION | + | |
- | sub print_help { | + | usage: |
- | # just print the help | + | |
- | my $hlp = shift; | + | |
- | open HELP, "< | + | |
- | while(< | + | |
- | print; | + | |
- | } | + | |
- | close HELP; | + | |
- | return; | + | |
- | } | + | |
- | sub trim { | + | print_help(help_file); |
- | my $string = shift; | + | |
- | $string =~ s/ | + | |
- | $string =~ s/ | + | |
- | return $string; | + | |
- | } | + | |
- | sub check_or_make { | + | - escape\_name |
- | my $place = shift; | + | |
- | # I must check if directory exist, else I create it. | + | |
- | if(opendir(TEST, | + | |
- | closedir TEST; | + | |
- | }else{ | + | |
- | mkdir $place; | + | |
- | } | + | |
- | } | + | |
- | sub load_project { | + | This function takes a string and remove some especial characters |
- | my $study = shift; | + | in order to escape directory names with a lot of strange symbols. |
- | my %stdenv = map {/(.*) = (.*)/; $1=>$2 } read_file $ENV{HOME}."/ | + | |
- | return %stdenv; | + | |
- | } | + | |
- | sub check_subj { | + | It returns the escaped string |
- | my $proj_path = shift; | + | |
- | my $subj = shift; | + | |
- | my %mri = (' | + | |
- | my $subj_dir = $proj_path.'/ | + | |
- | if( -e $subj_dir && -d $subj_dir){ | + | |
- | my @t1 = find(file => ' | + | |
- | if (-e $t1[0] && -f $t1[0]){ | + | |
- | $mri{' | + | |
- | } | + | |
- | my @t2 = find(file => ' | + | |
- | if (-e $t2[0] && -f $t2[0]){ | + | |
- | $mri{' | + | |
- | } | + | |
- | } | + | |
- | $subj_dir = $proj_path.'/ | + | |
- | if( -e $subj_dir && -d $subj_dir){ | + | |
- | my @dwi_sbref = find(file => ' | + | |
- | if (-e $dwi_sbref[0] && -f $dwi_sbref[0]){ | + | |
- | $mri{' | + | |
- | } | + | |
- | my @dwi = find(file => ' | + | |
- | if (-e $dwi[0] && -f $dwi[0]){ | + | |
- | ($mri{' | + | |
- | } | + | |
- | } | + | |
- | return %mri; | + | |
- | } | + | |
- | sub check_fs_subj { | + | usage: |
- | my $subj = shift; | + | |
- | my $subj_dir = qx/echo \$SUBJECTS_DIR/; | + | |
- | chomp($subj_dir); | + | |
- | my $place = $subj_dir."/" | + | |
- | my $ok = 0; | + | |
- | # I must check if directory exist. | + | |
- | if( -e $place && -d $place){$ok = 1;} | + | |
- | return $ok; | + | |
- | } | + | |
- | sub get_lut { | + | escape_name(string); |
- | my $ifile = shift; | + | |
- | my $patt = '\s*(\d{1,8})\s*([A-Z, | + | |
- | my %aseg_data = map {/$patt/; $1=>$2} grep {/^$patt/} read_file $ifile; | + | |
- | return %aseg_data; | + | |
- | } | + | |
- | sub run_dckey{ | + | - trim |
- | my @props = @_; | + | |
- | my $order = " | + | |
- | print " | + | |
- | my $dckey = qx/ | + | |
- | chomp($dckey); | + | |
- | $dckey =~ s/\s*//g; | + | |
- | return $dckey; | + | |
- | } | + | |
- | sub dclokey{ | + | This function takes a string and remove any trailing spaces after and before the text |
- | my @props = @_; | + | |
- | my $order = " | + | |
- | print " | + | |
- | my $line = qx/ | + | |
- | (my $dckey) = $line =~ / | + | |
- | if($dckey){ | + | |
- | $dckey =~ s/\s*//g; | + | |
- | } | + | |
- | return $dckey; | + | |
- | } | + | |
- | sub centiloid_fbb { | + | usage: |
- | my $suvr = shift; | + | |
- | return 153.4*$suvr-154.9; | + | |
- | } | + | |
- | sub populate { | + | trim(string); |
- | my $patt = shift; | + | |
- | my $csv = shift; | + | |
- | my %pdata = map { /^$patt$/; $1 => $2} grep {/^$patt$/} read_file $csv; | + | |
- | return %pdata; | + | |
- | } | + | |
- | sub get_subjects { | + | - check\_or\_make |
- | my $db = shift; | + | |
- | my @slist = map {/^(\d{4});.*$/; $1} grep { /^\d{4}/ } read_file($db, | + | |
- | return @slist; | + | |
- | } | + | |
- | sub get_list { | + | This is mostly helpless, just takes a path, |
- | my $ifile = shift; | + | |
- | my @slist = map {/ | + | |
- | return @slist; | + | |
- | } | + | |
- | sub get_pair { | + | usage: |
- | my $ifile = shift; | + | |
- | my %pet_data = map {/ | + | |
- | return %pet_data; | + | |
- | } | + | |
- | sub shit_done { | + | check_or_make(path); |
- | my @adv = @_; | + | |
- | my $msg = MIME:: | + | |
- | From => " | + | |
- | To => " | + | |
- | Subject => ' | + | |
- | Type => ' | + | |
- | | + | |
- | $msg-> | + | - inplace |
- | Type => ' | + | |
- | Data => " | + | |
- | ); | + | |
- | $msg-> | + | |
- | Type => ' | + | and returns a string with a single path as result of |
- | Path | + | the concatenation of the first one plus the second one |
- | | + | |
- | ); | + | |
- | $msg-> | + | usage: |
- | } | + | |
- | sub cut_shit { | + | inplace(path, filename); |
- | my $db = shift; | + | |
- | my $cfile = shift; | + | |
- | my @plist = get_subjects($db); | + | |
- | my @oklist; | + | |
- | if (-e $cfile){ | + | |
- | my @cuts = get_list($cfile); | + | |
- | foreach my $cut (sort @cuts){ | + | |
- | if(grep {/$cut/} @plist){ | + | |
- | push @oklist, $cut; | + | |
- | } | + | |
- | } | + | |
- | }else{ | + | |
- | @oklist = @plist; | + | |
- | } | + | |
- | return @oklist; | + | |
- | } | + | |
- | sub escape_name { | + | - load\_project |
- | # in order to escape directory names with a lot of strange symbols | + | |
- | my $name = shift; | + | |
- | $name=~s/\ /\\\ /g; | + | |
- | $name=~s/ | + | |
- | $name=~s/ | + | |
- | $name=~s/ | + | |
- | return $name; | + | |
- | } | + | |
+ | This function take the name of a project, reads the configuration file | ||
+ | that is located at ~/ | ||
+ | stored as a hash that can be used at the scripts | ||
+ | |||
+ | usage: | ||
+ | |||
+ | load_project(project_name); | ||
+ | |||
+ | - check\_subj | ||
+ | |||
+ | Here the fun begins | ||
+ | |||
+ | This function takes as input the name of the project and the subject ID | ||
+ | Then it seeks along the BIDS structure for this subject and returns a hash, | ||
+ | containing the MRI proper images. | ||
+ | |||
+ | It should return a single value, except for the T1w images, where an array | ||
+ | is returned. This was though this way because mostly a single session is done. | ||
+ | However, the skill to detect more than one MRI was introduced to allow the | ||
+ | movement correction when ADNI images are analyzed | ||
+ | |||
+ | So, for T1w images the returned hash should be asked as | ||
+ | |||
+ | @{$nifti{' | ||
+ | |||
+ | but for other kind of image it should asked as | ||
+ | |||
+ | $nifti{' | ||
+ | |||
+ | usage: | ||
+ | |||
+ | check_subj(project_path, | ||
+ | |||
+ | - check\_pet | ||
+ | |||
+ | This function takes as input the name of the project and the subject ID | ||
+ | Then it seeks along the BIDS structure for this subject and returns a hash, | ||
+ | containing the PET proper images. | ||
+ | |||
+ | If also a tracer is given as input, then the returned hash contains the PET-tau | ||
+ | associated to this tracer. This was introduced as part of a project were the subjects | ||
+ | were analyzed with different radiotracers. | ||
+ | |||
+ | If no tracer is given, it will seek for the FBB PETs. Those PETs are stored as | ||
+ | |||
+ | - single: 4x5min | ||
+ | - combined: 20min | ||
+ | |||
+ | usage: | ||
+ | |||
+ | check_pet(project_path, | ||
+ | |||
+ | - check\_fs\_subj | ||
+ | |||
+ | This function checks if the Freesurfer directory of a given subjects exists | ||
+ | |||
+ | usage: | ||
+ | |||
+ | check_fs_subj(freesurfer_id) | ||
+ | |||
+ | - get\_lut | ||
+ | |||
+ | I really don't even remenber what this shit does | ||
+ | |||
+ | - run\_dckey | ||
+ | |||
+ | Get the content of a public tag from a DICOM file. | ||
+ | |||
+ | usage: | ||
+ | |||
+ | run_dckey(key, | ||
+ | |||
+ | - dclokey | ||
+ | |||
+ | Get the content of a private tag from a DICOM file. | ||
+ | |||
+ | usage: | ||
+ | |||
+ | dclokey(key, | ||
+ | |||
+ | - centiloid\_fbb | ||
+ | |||
+ | Returns the proper centiloid value for a given SUVR. | ||
+ | Only valid for FBB. | ||
+ | |||
+ | usage: | ||
+ | |||
+ | centiloid_fbb(suvr); | ||
+ | |||
+ | - populate | ||
+ | |||
+ | Takes a pattern and a filename and stores the content of the file | ||
+ | into a HASH according to the given pattern | ||
+ | |||
+ | usage: | ||
+ | |||
+ | populate(pattern, | ||
+ | |||
+ | - get\_subjects | ||
+ | |||
+ | Parse a project database taking only the subjects and storing them into an array. | ||
+ | The databse is expected to be build as, | ||
+ | |||
+ | 0000;name | ||
+ | |||
+ | usage: | ||
+ | |||
+ | get_subjects(filename); | ||
+ | |||
+ | - get\_list | ||
+ | |||
+ | Parse a project database taking only the subjects and storing them into an array. | ||
+ | The databse is expected to be build with a four digits number at the beginning of | ||
+ | line. Is similar to get\_subjects() function but less restrictive | ||
+ | |||
+ | usage: | ||
+ | |||
+ | get_list(filename); | ||
+ | |||
+ | - get\_pair | ||
+ | |||
+ | A single file is loaded as input and parse into a HASH. | ||
+ | The file should be written in the format: | ||
+ | |||
+ | key;value | ||
+ | |||
+ | usage: | ||
+ | |||
+ | get_pair(filename); | ||
+ | |||
+ | - shit\_done | ||
+ | |||
+ | this function is intended to be used after a script ends | ||
+ | and then an email is send to the user | ||
+ | with the name of the script, the name of the project and th results attached | ||
+ | |||
+ | usage: | ||
+ | |||
+ | shit_done(script_name, | ||
+ | |||
+ | - cut\_shit | ||
+ | |||
+ | This function takes a project database and a file with a list, then | ||
+ | returns the elements that are common to both. | ||
+ | It is intended to be used to restrict the scripts action | ||
+ | over a few elements. It returns a single array. | ||
+ | |||
+ | If it is correctly used, first the db is identified with | ||
+ | _load\_project()_ function and then passed through this function | ||
+ | to get the array of subjects to be analyzed. If the file with | ||
+ | the cutting list do not exist, an array with all the subjects | ||
+ | is returned. | ||
+ | |||
+ | usage: | ||
+ | |||
+ | cut_shit(db, | ||
+ | |||
+ | - getLoggingTime | ||
+ | |||
+ | This function returns a timestamp based string intended to be used | ||
+ | to make unique filenames | ||
+ | |||
+ | Stolen from Stackoverflow | ||
+ | |||
+ | usage: | ||
+ | |||
+ | getLoggingTime(); | ||
+ | |||
+ | </ | ||
+ | |||
+ | < | ||
+ | |||
+ | # FSMetrics | ||
+ | |||
+ | Bunch of helpers for storing ROI structure and relative data | ||
+ | |||
+ | - fs\_file\_metrics | ||
+ | |||
+ | This function does not read any input. It sole purpose is to | ||
+ | returns a HASH containing the templates of order for converting Freesurfer (FS) | ||
+ | results into tables. | ||
+ | |||
+ | Any hash element is composed by the template (' | ||
+ | if the FS stats will be processed and the name of the FS stat file (' | ||
+ | The order template has two wildcards (< | ||
+ | parsed and changed by the FS subject id and the output directory where the | ||
+ | data tables will be stored for each subject | ||
+ | |||
+ | The function could be invoked as, | ||
+ | |||
+ | my %stats = fs_file_metrics(); | ||
+ | |||
+ | in any script where this information would be needed. | ||
+ | |||
+ | The boolean element could be used to choose the stats that should | ||
+ | be processed and can be added or modified even at run time if needed. The | ||
+ | stored booleans only provided a decent default | ||
+ | |||
+ | - fs\_fbb\_rois | ||
+ | |||
+ | _deprecated_ | ||
+ | |||
+ | This function exports a HASH that contains the Freesurfer composition of the | ||
+ | usual segmentations used for building the SUVR ROI | ||
+ | |||
+ | - tau\_rois | ||
+ | |||
+ | This function takes a string as input and returns an ARRAY containing | ||
+ | the list of ROIs that should be build and where the SUVR should be calculated | ||
+ | |||
+ | It is intended to be used for PET-Tau but could be used anywhere | ||
+ | |||
+ | By default a list of Braak areas are returned. If the input string is **alt** | ||
+ | a grouping of those Braak areas is returned. If the purpose is to build | ||
+ | a meta\_temporal ROI the string **meta** should be passed as input | ||
+ | |||
+ | The main idea here is read the corresponding file for each ROI, stored at | ||
+ | `PIPEDIR/ | ||
+ | |||
+ | - pet\_rois | ||
+ | |||
+ | This function takes a string as input and returns an ARRAY containing | ||
+ | the list of ROIs that should be build and where the SUVR should be calculated | ||
+ | |||
+ | Input values are **parietal**, | ||
+ | |||
+ | The main idea here is read the corresponding file for each ROI, stored at | ||
+ | `PIPEDIR/ | ||
+ | | ||
+ | </ | ||
+ | |||
+ | < | ||
+ | # SLURM | ||
+ | |||
+ | This module contains just a function to send the jobs to SLURM | ||
+ | from the Perl scripts | ||
+ | |||
+ | - send2slurm | ||
+ | |||
+ | The function takes a HASH as input where all the information | ||
+ | relative to the job should be stored. No data is mandatory | ||
+ | inside the input HASH, since the minimal values are automagicaly | ||
+ | asigned by default as a constructor (no really, but anyway). | ||
+ | |||
+ | Take into account that this subroutine only pass the parameters | ||
+ | to SLURM. So, the logic behind your actions should correspond | ||
+ | to what you want to do in any case, exactly as if you were | ||
+ | writing sbatch scripts. | ||
+ | |||
+ | The managed options for SLURM jobs are: | ||
+ | |||
+ | - filename: File where the sbatch script will be stored | ||
+ | - job_name: Job name for SLURM (-J) | ||
+ | - cpus: Number of CPUs to be used by each job (-c) | ||
+ | - mem_per_cpu: | ||
+ | - time: Maximum time that the job will be allowed to run (--time) | ||
+ | - output: File where the sbatch script output will be stored (-o) | ||
+ | - partition: SLURM partition to be used (-p) | ||
+ | - gres: GPUs to be used (--gres) | ||
+ | - command: Command to be executed at sbatch script | ||
+ | - mailtype: Type of warning to be emailed (--mail-type) | ||
+ | - dependency: Full dependency string to be used at sbatch execution (--dependency), | ||
+ | |||
+ | The function returns the jobid of the queued job, so it can be used to | ||
+ | build complex workflows. | ||
+ | |||
+ | usage: my $job\_id = send2slurm(\\%job\_properties); | ||
+ | |||
+ | Warning email: By default, if an empty HASH is passed to the function, | ||
+ | a no command sbatch script is launched | ||
+ | with _--mail-type=END_ option. The intention is that this could be used to | ||
+ | warn at the end of any launched swarm. Also, by changing **mailtype** but | ||
+ | ommiting the **command** value you can force the function to execute | ||
+ | an empty sbatch job with whatever warning behavior that you choose. | ||
+ | |||
+ | Dependencies: | ||
+ | the function the full string that SLURM expects. That is, you can pass something | ||
+ | like _singleton_ or _after: | ||
+ | This last can be build, by example, storing every previous jobid into an ARRAY | ||
+ | and passing then as, | ||
+ | |||
+ | ... | ||
+ | my $jobid = send2slurm(\%previous); | ||
+ | push @jobids, $jobid; | ||
+ | ... | ||
+ | $task{' | ||
+ | ... | ||
+ | send2slurm(\%task); | ||
+ | |||
+ | Of course, if dependencies are not going to be used, the | ||
+ | **dependency** option could be safely ignored. But notice that, if you are | ||
+ | reusing a HASH then this key should be deleted from it. | ||
+ | </ | ||
+ | |||
+ | < | ||
+ | # XNATACE | ||
+ | |||
+ | - xconf | ||
+ | |||
+ | Publish path of xnatapic configuration file | ||
+ | |||
+ | usage: | ||
+ | |||
+ | $path = xconf(); | ||
+ | |||
+ | - xget\_conf | ||
+ | |||
+ | Get the XNAT connection data into a HASH | ||
+ | |||
+ | usage: | ||
+ | |||
+ | %xnat_data = xget_conf() | ||
+ | |||
+ | - xget\_pet | ||
+ | |||
+ | Get the XNAT PET experiment ID | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_pet(host, | ||
+ | |||
+ | - xget\_mri | ||
+ | |||
+ | Get the XNAT MRI experiment ID | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_mri(host, | ||
+ | |||
+ | - xget\_fs\_data | ||
+ | Get the full Freesurfer directory in a tar.gz file | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_fs_data(host, | ||
+ | |||
+ | - xget\_fs\_stats | ||
+ | |||
+ | Get a single stats file from Freesurfer segmentation | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_fs_stats(host, | ||
+ | |||
+ | - xget\_session | ||
+ | |||
+ | Create a new JSESSIONID on XNAT. Return the connection data | ||
+ | for the server AND the ID of the created session | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_session(); | ||
+ | |||
+ | - xput\_report | ||
+ | |||
+ | Upload a pdf report to XNAT | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xput_report(host, | ||
+ | |||
+ | - xput\_rvr | ||
+ | |||
+ | Upload a JSON file with VR data | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xput_rvr(host, | ||
+ | |||
+ | - xget\_rvr | ||
+ | |||
+ | Get VR results into a HASH. Output is a hash with filenames and URI of each element stored at RVR | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_rvr(host, | ||
+ | |||
+ | - xget\_rvr\_data | ||
+ | |||
+ | Get RVR JSON data into a hash | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_rvr_data(host, | ||
+ | |||
+ | - xget\_subjects | ||
+ | |||
+ | Get the list of subjects of a project into a HASH. | ||
+ | El HASH de input, _%sbjs_, se construye como _{ XNAT\_ID => Label }_ | ||
+ | |||
+ | usage: | ||
+ | |||
+ | %sbjs = xget_subjects(host, | ||
+ | |||
+ | - xget\_pet\_reg | ||
+ | |||
+ | Download de pet registered into native space in nifti format | ||
+ | |||
+ | usage: | ||
+ | |||
+ | xget_pet_reg(host, | ||
+ | |||
+ | - xget\_pet\_data | ||
+ | |||
+ | Get the PET FBB analysis results into a HASH | ||
+ | |||
+ | usage: | ||
+ | |||
+ | %xresult = xget_pet_data(host, | ||
+ | |||
+ | - xget\_exp\_data | ||
+ | |||
+ | Get a data field of an experiment. | ||
+ | The desired field shoud be indicated as input. | ||
+ | By example, if you want the date of the experiment this is | ||
+ | seeked as | ||
+ | |||
+ | my $xdate = xget_exp_data($host, | ||
+ | |||
+ | There are some common fields as _date_, _label_ or _dcmPatientId_ | ||
+ | but in general | ||
+ | |||
+ | curl -X GET -b JSESSIONID=00000blahblah " | ||
+ | |||
+ | in order to know the available fields | ||
+ | |||
+ | usage: | ||
+ | |||
+ | $xdata = xget_exp_data(host, | ||
+ | |||
+ | - xget\_sbj\_data | ||
+ | |||
+ | Get the subjects metadata. Not too | ||
+ | much interesting but to extract | ||
+ | the subject label. | ||
+ | |||
+ | usage: | ||
+ | |||
+ | $xdata = xget_sbj_data(host, | ||
+ | |||
+ | |||
+ | </ | ||
+ | ====== Dependencias ====== | ||
+ | |||
+ | < | ||
+ | Data::Dump | ||
+ | File::Slurp | ||
+ | File:: | ||
+ | File::Temp | ||
+ | File:: | ||
+ | File::Copy | ||
+ | File:: | ||
+ | File:: | ||
+ | Cwd | ||
+ | Spreadsheet:: | ||
+ | Text::CSV | ||
+ | File::Path | ||
+ | MIME::Lite | ||
+ | JSON | ||
</ | </ | ||
+ | |||
+ | |||
+ | |||
+ | |||
+ |