#!/bin/tcsh -xef echo "auto-generated by afni_proc.py, Wed Nov 28 08:49:44 2018" echo "(version 5.18, September 12, 2017)" echo "execution started: `date`" # execute via : # tcsh -xef proc_${subj} |& tee output.proc_${subj} # =========================== auto block: setup ============================ # script setup # take note of the AFNI version afni -ver # check that the current AFNI version is recent enough afni_history -check_date 23 Sep 2016 if ( $status ) then echo "** this script requires newer AFNI binaries (than 23 Sep 2016)" echo " (consider: @update.afni.binaries -defaults)" exit endif # the user may specify a single subject to run with if ( $#argv > 0 ) then set subj=$1 set af=`which afni` set apath=`dirname $af` set stpath=/data/taylorc_group/data/R61/ set spath=/data/taylorc_group/data/R61/${subj}/ cd $stpath else exit endif # assign output directory name set output_dir = ${subj}.SIDresults # verify that the results directory does not yet exist if ( -d $output_dir ) then echo output dir "$subj.results" already exists exit endif # set list of runs set runs = (`count -digits 2 1 2`) # create results and stimuli directories mkdir $output_dir mkdir $output_dir/stimuli # copy stim files into stimulus directory cp \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Anticip_1.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Anticip_2.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Anticip_3.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Anticip_4.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Anticip_5.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Anticip_6.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_10.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_11.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_1.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_12.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_2.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_3.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_4.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_5.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_6.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_7.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_8.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_Consump_9.1D \ /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_SIDclip_cat_rating.1D \ $output_dir/stimuli # copy anatomy to results dir 3dcopy ${spath}anatSS.${subj}.nii $output_dir/anatSS.${subj} # copy external -tlrc_NL_warped_dsets datasets 3dcopy ${spath}anatQQ.${subj}.nii $output_dir/anatQQ.${subj} 3dcopy ${spath}anatQQ.${subj}.aff12.1D $output_dir/anatQQ.aff12.1D 3dcopy ${spath}anatQQ.${subj}_WARP.nii $output_dir/anatQQ.${subj}_WARP.nii # ============================ auto block: tcat ============================ # apply 3dTcat to copy input dsets to results dir, while # removing the first 2 TRs 3dTcat -prefix $output_dir/pb00.$subj.r01.tcat \ ${spath}${subj}SIDRun1_topup.nii.gz'[2..$]' 3dTcat -prefix $output_dir/pb00.$subj.r02.tcat \ ${spath}${subj}SIDRun2_topup.nii.gz'[2..$]' # and make note of repetitions (TRs) per run #set tr_counts = ( 273 273 ) set tr_counts = (`3dnvals $output_dir/pb00.$subj.r??.tcat+orig.HEAD` ) # ------------------------------------------------------- # enter the results directory (can begin processing data) cd $output_dir # ========================== auto block: outcount ========================== # data check: compute outlier fraction for each volume touch out.pre_ss_warn.txt foreach run ( $runs ) 3dToutcount -automask -fraction -polort 4 -legendre \ pb00.$subj.r$run.tcat+orig > outcount.r$run.1D # censor outlier TRs per run, ignoring the first 0 TRs # - censor when more than 0.1 of automask voxels are outliers # - step() defines which TRs to remove via censoring 1deval -a outcount.r$run.1D -expr "1-step(a-0.1)" > rm.out.cen.r$run.1D # outliers at TR 0 might suggest pre-steady state TRs if ( `1deval -a outcount.r$run.1D"{0}" -expr "step(a-0.4)"` ) then echo "** TR #0 outliers: possible pre-steady state TRs in run $run" \ >> out.pre_ss_warn.txt endif end # catenate outlier counts into a single time series cat outcount.r*.1D > outcount_rall.1D # catenate outlier censor files into a single time series cat rm.out.cen.r*.1D > outcount_${subj}_censor.1D # get run number and TR index for minimum outlier volume set minindex = `3dTstat -argmin -prefix - outcount_rall.1D\'` set ovals = ( `1d_tool.py -set_run_lengths $tr_counts \ -index_to_run_tr $minindex` ) # save run and TR indices for extraction of vr_base_min_outlier set minoutrun = $ovals[1] set minouttr = $ovals[2] echo "min outlier: run $minoutrun, TR $minouttr" | tee out.min_outlier.txt # ================================ despike ================================= # apply 3dDespike to each run foreach run ( $runs ) 3dDespike -NEW -nomask -prefix pb01.$subj.r$run.despike \ pb00.$subj.r$run.tcat+orig end # ================================= tshift ================================= # time shift data so all slice timing is the same foreach run ( $runs ) 3dTshift -tpattern altplus -tzero 0 -quintic -prefix pb02.$subj.r$run.tshift \ pb01.$subj.r$run.despike+orig end # -------------------------------- # extract volreg registration base 3dbucket -prefix vr_base_min_outlier \ pb02.$subj.r$minoutrun.tshift+orig"[$minouttr]" # ================================= align ================================== # for e2a: compute anat alignment transformation to EPI registration base # (new anat will be current anatSS.${subj}+orig) align_epi_anat.py -anat2epi -anat anatSS.${subj}+orig \ -suffix _al_junk \ -epi vr_base_min_outlier+orig -epi_base 0 \ -epi_strip 3dAutomask \ -anat_has_skull no \ -ginormous_move -deoblique on -cost lpc+ZZ \ -volreg off -tshift off # ================================== tlrc ================================== # nothing to do: have external -tlrc_NL_warped_dsets # warped anat : anatQQ.${subj}+tlrc # affine xform : anatQQ.aff12.1D # non-linear warp : anatQQ.${subj}_WARP.nii.gz # ================================= volreg ================================= # align each dset to base volume, align to anat, warp to tlrc space # verify that we have a +tlrc warp dataset if ( ! -f anatQQ.${subj}+tlrc.HEAD ) then echo "** missing +tlrc warp dataset: anatQQ.${subj}+tlrc.HEAD" exit endif # register and warp foreach run ( $runs ) # register each volume to the base 3dvolreg -verbose -zpad 1 -base vr_base_min_outlier+orig \ -1Dfile dfile.r$run.1D -prefix rm.epi.volreg.r$run \ -cubic \ -1Dmatrix_save mat.r$run.vr.aff12.1D \ pb02.$subj.r$run.tshift+orig # create an all-1 dataset to mask the extents of the warp 3dcalc -overwrite -a pb02.$subj.r$run.tshift+orig -expr 1 \ -prefix rm.epi.all1 # catenate volreg/epi2anat/tlrc xforms cat_matvec -ONELINE \ anatQQ.aff12.1D \ anatSS.${subj}_al_junk_mat.aff12.1D -I \ mat.r$run.vr.aff12.1D > mat.r$run.warp.aff12.1D # apply catenated xform: volreg/epi2anat/tlrc # then apply non-linear standard-space warp 3dNwarpApply -master anatQQ.${subj}+tlrc -dxyz 3.5 \ -source pb02.$subj.r$run.tshift+orig \ -nwarp "anatQQ.${subj}_WARP.nii.gz \ mat.r$run.warp.aff12.1D" \ -prefix rm.epi.nomask.r$run # warp the all-1 dataset for extents masking 3dNwarpApply -master anatQQ.${subj}+tlrc -dxyz 3.5 \ -source rm.epi.all1+orig \ -nwarp "anatQQ.${subj}_WARP.nii.gz \ mat.r$run.warp.aff12.1D" \ -interp cubic \ -ainterp NN -quiet \ -prefix rm.epi.1.r$run # make an extents intersection mask of this run 3dTstat -min -prefix rm.epi.min.r$run rm.epi.1.r$run+tlrc end # make a single file of registration params cat dfile.r*.1D > dfile_rall.1D # ---------------------------------------- # create the extents mask: mask_epi_extents+tlrc # (this is a mask of voxels that have valid data at every TR) 3dMean -datum short -prefix rm.epi.mean rm.epi.min.r*.HEAD 3dcalc -a rm.epi.mean+tlrc -expr 'step(a-0.999)' -prefix mask_epi_extents # and apply the extents mask to the EPI data # (delete any time series with missing data) foreach run ( $runs ) 3dcalc -a rm.epi.nomask.r$run+tlrc -b mask_epi_extents+tlrc \ -expr 'a*b' -prefix pb03.$subj.r$run.volreg end # warp the volreg base EPI dataset to make a final version cat_matvec -ONELINE \ anatQQ.aff12.1D \ anatSS.${subj}_al_junk_mat.aff12.1D -I > mat.basewarp.aff12.1D 3dNwarpApply -master anatQQ.${subj}+tlrc -dxyz 3.5 \ -source vr_base_min_outlier+orig \ -nwarp "anatQQ.${subj}_WARP.nii.gz mat.basewarp.aff12.1D" \ -prefix final_epi_vr_base_min_outlier # create an anat_final dataset, aligned with stats 3dcopy anatQQ.${subj}+tlrc anat_final.$subj # record final registration costs 3dAllineate -base final_epi_vr_base_min_outlier+tlrc -allcostX \ -input anat_final.$subj+tlrc |& tee out.allcostX.txt # ================================== blur ================================== # blur each volume of each run foreach run ( $runs ) 3dBlurInMask -preserve -FWHM 6.0 -automask \ -prefix pb04.$subj.r$run.blur \ pb03.$subj.r$run.volreg+tlrc end # ================================== mask ================================== # create 'full_mask' dataset (union mask) foreach run ( $runs ) 3dAutomask -dilate 1 -prefix rm.mask_r$run pb04.$subj.r$run.blur+tlrc end # create union of inputs, output type is byte 3dmask_tool -inputs rm.mask_r*+tlrc.HEAD -union -prefix full_mask.$subj # ---- create subject anatomy mask, mask_anat.$subj+tlrc ---- # (resampled from tlrc anat) 3dresample -master full_mask.$subj+tlrc -input anatQQ.${subj}+tlrc \ -prefix rm.resam.anat # convert to binary anat mask; fill gaps and holes 3dmask_tool -dilate_input 5 -5 -fill_holes -input rm.resam.anat+tlrc \ -prefix mask_anat.$subj # compute overlaps between anat and EPI masks 3dABoverlap -no_automask full_mask.$subj+tlrc mask_anat.$subj+tlrc \ |& tee out.mask_ae_overlap.txt # note Dice coefficient of masks, as well 3ddot -dodice full_mask.$subj+tlrc mask_anat.$subj+tlrc \ |& tee out.mask_ae_dice.txt # ---- create group anatomy mask, mask_group+tlrc ---- # (resampled from tlrc base anat, MNI152_2009_template.nii.gz) 3dresample -master full_mask.$subj+tlrc -prefix ./rm.resam.group \ -input /software/AFNI_17.2.17/abin/MNI152_2009_template.nii.gz # convert to binary group mask; fill gaps and holes 3dmask_tool -dilate_input 5 -5 -fill_holes -input rm.resam.group+tlrc \ -prefix mask_group # ================================= scale ================================== # scale each voxel time series to have a mean of 100 # (be sure no negatives creep in) # (subject to a range of [0,200]) foreach run ( $runs ) 3dTstat -prefix rm.mean_r$run pb04.$subj.r$run.blur+tlrc 3dcalc -a pb04.$subj.r$run.blur+tlrc -b rm.mean_r$run+tlrc \ -c mask_epi_extents+tlrc \ -expr 'c * min(200, a/b*100)*step(a)*step(b)' \ -prefix pb05.$subj.r$run.scale end # ================================ regress ================================= # these broke when the length of the summed motion was an odd value # to fix this I ran them separate then combined them # compute de-meaned motion parameters (for use in regression) #1d_tool.py -infile dfile_rall.1D -set_nruns 2 -demean -write motion_demean.1D 1d_tool.py -infile dfile.r01.1D -set_nruns 1 -demean -write motion_demean_r01.1D 1d_tool.py -infile dfile.r02.1D -set_nruns 2 -demean -write motion_demean_r02.1D cat motion_demean_r01.1D motion_demean_r02.1D > motion_demean.1D # compute motion parameter derivatives (for use in regression) #1d_tool.py -infile dfile_rall.1D -set_nruns 2 -derivative -demean -write motion_deriv.1D 1d_tool.py -infile dfile.r01.1D -set_nruns 1 -derivative -demean -write motion_deriv_r01.1D 1d_tool.py -infile dfile.r02.1D -set_nruns 1 -derivative -demean -write motion_deriv_r02.1D cat motion_deriv_r01.1D motion_deriv_r02.1D > motion_deriv.1D # this won't run on odd or uneven length fun files # convert motion parameters for per-run regression #1d_tool.py -infile motion_demean.1D -set_nruns 2 -split_into_pad_runs mot_demean awk '{if(FNR==NR){print $0}else{print "0 0 0 0 0 0"}}' motion_demean_r01.1D motion_demean_r02.1D > mot_demean.r01.1D awk '{if(FNR!=NR){print $0}else{print "0 0 0 0 0 0"}}' motion_demean_r01.1D motion_demean_r02.1D > mot_demean.r02.1D #1d_tool.py -infile motion_deriv.1D -set_nruns 2 -split_into_pad_runs mot_deriv awk '{if(FNR==NR){print $0}else{print "0 0 0 0 0 0"}}' motion_deriv_r01.1D motion_deriv_r02.1D > mot_deriv.r01.1D awk '{if(FNR!=NR){print $0}else{print "0 0 0 0 0 0"}}' motion_deriv_r01.1D motion_deriv_r02.1D > mot_deriv.r02.1D # create censor file motion_${subj}_censor.1D, for censoring motion #1d_tool.py -infile dfile_rall.1D -set_nruns 2 -show_censor_count -censor_prev_TR -censor_motion 0.3 motion_${subj} 1d_tool.py -infile dfile.r01.1D -set_nruns 1 -show_censor_count -censor_prev_TR -censor_motion 0.3 motion_${subj}_r01 1d_tool.py -infile dfile.r02.1D -set_nruns 1 -show_censor_count -censor_prev_TR -censor_motion 0.3 motion_${subj}_r02 cat motion_${subj}_r01_enorm.1D motion_${subj}_r02_enorm.1D >motion_${subj}_enorm.1D cat motion_${subj}_r01_censor.1D motion_${subj}_r02_censor.1D >motion_${subj}_censor.1D # combine multiple censor files 1deval -a motion_${subj}_censor.1D -b outcount_${subj}_censor.1D \ -expr "a*b" > censor_${subj}_combined_2.1D # note TRs that were not censored set ktrs = `1d_tool.py -infile censor_${subj}_combined_2.1D \ -show_trs_uncensored encoded` # ------------------------------ # run the regression analysis # stimuli/${subj}_SIDclip_cat_Anticip_1.1D Reward Neutral\ # stimuli/${subj}_SIDclip_cat_Anticip_2.1D Reward Low\ # stimuli/${subj}_SIDclip_cat_Anticip_3.1D Reward High\ # stimuli/${subj}_SIDclip_cat_Anticip_4.1D Punishment Neutral\ # stimuli/${subj}_SIDclip_cat_Anticip_5.1D Punishment Low\ # stimuli/${subj}_SIDclip_cat_Anticip_6.1D Punishment High\ # stimuli/${subj}_SIDclip_cat_Consump_1.1D Reward Neutral Hit\ # stimuli/${subj}_SIDclip_cat_Consump_2.1D Reward Low Hit\ # stimuli/${subj}_SIDclip_cat_Consump_3.1D Reward High Hit\ # stimuli/${subj}_SIDclip_cat_Consump_4.1D Punishment Neutral Hit\ # stimuli/${subj}_SIDclip_cat_Consump_5.1D Punishment Low Hit \ # stimuli/${subj}_SIDclip_cat_Consump_6.1D Punishment High Hit\ # stimuli/${subj}_SIDclip_cat_Consump_7.1D Reward Neutral Miss\ # stimuli/${subj}_SIDclip_cat_Consump_8.1D Reward Low Miss\ # stimuli/${subj}_SIDclip_cat_Consump_9.1D Reward High Miss\ # stimuli/${subj}_SIDclip_cat_Consump_10.1D Punishment Neutral Miss\ # stimuli/${subj}_SIDclip_cat_Consump_11.1D Punishment Low Miss\ # stimuli/${subj}_SIDclip_cat_Consump_12.1D Punishment High Miss\ # stimuli/${subj}_SIDclip_cat_rating.1D Rating\ 3dDeconvolve -input pb05.$subj.r*.scale+tlrc.HEAD \ -censor censor_${subj}_combined_2.1D \ -polort 4 \ -num_stimts 43 \ -stim_times_AM1 1 stimuli/${subj}_SIDclip_cat_Anticip_1.1D 'dmBLOCK' \ -stim_label 1 AntRwdNone\ -stim_times_AM1 2 stimuli/${subj}_SIDclip_cat_Anticip_2.1D 'dmBLOCK' \ -stim_label 2 AntRwdLow\ -stim_times_AM1 3 stimuli/${subj}_SIDclip_cat_Anticip_3.1D 'dmBLOCK' \ -stim_label 3 AntRwdHigh\ -stim_times_AM1 4 stimuli/${subj}_SIDclip_cat_Anticip_4.1D 'dmBLOCK' \ -stim_label 4 AntPunNone\ -stim_times_AM1 5 stimuli/${subj}_SIDclip_cat_Anticip_5.1D 'dmBLOCK' \ -stim_label 5 AntPunLow\ -stim_times_AM1 6 stimuli/${subj}_SIDclip_cat_Anticip_6.1D 'dmBLOCK' \ -stim_label 6 AntPunHigh\ -stim_times_AM1 7 stimuli/${subj}_SIDclip_cat_Consump_1.1D 'dmBLOCK' \ -stim_label 7 RwdNoneHit\ -stim_times_AM1 8 stimuli/${subj}_SIDclip_cat_Consump_2.1D 'dmBLOCK' \ -stim_label 8 RwdLowHit\ -stim_times_AM1 9 stimuli/${subj}_SIDclip_cat_Consump_3.1D 'dmBLOCK' \ -stim_label 9 RwdHighHit\ -stim_times_AM1 10 stimuli/${subj}_SIDclip_cat_Consump_4.1D 'dmBLOCK' \ -stim_label 10 PunNoneHit\ -stim_times_AM1 11 stimuli/${subj}_SIDclip_cat_Consump_5.1D 'dmBLOCK' \ -stim_label 11 PunLowHit \ -stim_times_AM1 12 stimuli/${subj}_SIDclip_cat_Consump_6.1D 'dmBLOCK' \ -stim_label 12 PunHighHit\ -stim_times_AM1 13 stimuli/${subj}_SIDclip_cat_Consump_7.1D 'dmBLOCK' \ -stim_label 13 RwdNoneMiss\ -stim_times_AM1 14 stimuli/${subj}_SIDclip_cat_Consump_8.1D 'dmBLOCK' \ -stim_label 14 RwdLowMiss\ -stim_times_AM1 15 stimuli/${subj}_SIDclip_cat_Consump_9.1D 'dmBLOCK' \ -stim_label 15 RwdHighMiss\ -stim_times_AM1 16 stimuli/${subj}_SIDclip_cat_Consump_10.1D 'dmBLOCK' \ -stim_label 16 PunNoneMiss\ -stim_times_AM1 17 stimuli/${subj}_SIDclip_cat_Consump_11.1D 'dmBLOCK' \ -stim_label 17 PunLowMiss\ -stim_times_AM1 18 stimuli/${subj}_SIDclip_cat_Consump_12.1D 'dmBLOCK' \ -stim_label 18 PunHighMiss\ -stim_times_AM1 19 stimuli/${subj}_SIDclip_cat_rating.1D 'dmBLOCK' \ -stim_label 19 Rating\ -stim_file 20 mot_demean.r01.1D'[0]' -stim_base 20 -stim_label 20 roll_01 \ -stim_file 21 mot_demean.r01.1D'[1]' -stim_base 21 -stim_label 21 pitch_01 \ -stim_file 22 mot_demean.r01.1D'[2]' -stim_base 22 -stim_label 22 yaw_01 \ -stim_file 23 mot_demean.r01.1D'[3]' -stim_base 23 -stim_label 23 dS_01 \ -stim_file 24 mot_demean.r01.1D'[4]' -stim_base 24 -stim_label 24 dL_01 \ -stim_file 25 mot_demean.r01.1D'[5]' -stim_base 25 -stim_label 25 dP_01 \ -stim_file 26 mot_demean.r02.1D'[0]' -stim_base 26 -stim_label 26 roll_02 \ -stim_file 27 mot_demean.r02.1D'[1]' -stim_base 27 -stim_label 27 pitch_02 \ -stim_file 28 mot_demean.r02.1D'[2]' -stim_base 28 -stim_label 28 yaw_02 \ -stim_file 29 mot_demean.r02.1D'[3]' -stim_base 29 -stim_label 29 dS_02 \ -stim_file 30 mot_demean.r02.1D'[4]' -stim_base 30 -stim_label 30 dL_02 \ -stim_file 31 mot_demean.r02.1D'[5]' -stim_base 31 -stim_label 31 dP_02 \ -stim_file 32 mot_deriv.r01.1D'[0]' -stim_base 32 -stim_label 32 roll_03 \ -stim_file 33 mot_deriv.r01.1D'[1]' -stim_base 33 -stim_label 33 pitch_03 \ -stim_file 34 mot_deriv.r01.1D'[2]' -stim_base 34 -stim_label 34 yaw_03 \ -stim_file 35 mot_deriv.r01.1D'[3]' -stim_base 35 -stim_label 35 dS_03 \ -stim_file 36 mot_deriv.r01.1D'[4]' -stim_base 36 -stim_label 36 dL_03 \ -stim_file 37 mot_deriv.r01.1D'[5]' -stim_base 37 -stim_label 37 dP_03 \ -stim_file 38 mot_deriv.r02.1D'[0]' -stim_base 38 -stim_label 38 roll_04 \ -stim_file 39 mot_deriv.r02.1D'[1]' -stim_base 39 -stim_label 39 pitch_04 \ -stim_file 40 mot_deriv.r02.1D'[2]' -stim_base 40 -stim_label 40 yaw_04 \ -stim_file 41 mot_deriv.r02.1D'[3]' -stim_base 41 -stim_label 41 dS_04 \ -stim_file 42 mot_deriv.r02.1D'[4]' -stim_base 42 -stim_label 42 dL_04 \ -stim_file 43 mot_deriv.r02.1D'[5]' -stim_base 43 -stim_label 43 dP_04 \ -jobs 4 -num_glt 20 \ -gltsym 'SYM: +1.0*AntRwdLow -1.0*AntRwdNone' \ -glt_label 1 AnticipLowGain-NoGain \ -gltsym 'SYM: +1.0*AntRwdHigh -1.0*AntRwdNone' \ -glt_label 2 AnticipHighGain-NoGain \ -gltsym 'SYM: +1.0*RwdLowHit -1.0*RwdNoneHit' \ -glt_label 3 OutcomeLowGainHit-NoGainHit \ -gltsym 'SYM: +1.0*RwdHighHit -1.0*RwdNoneHit' \ -glt_label 4 OutcomeHighGainHit-NoGainHit \ -gltsym 'SYM: +1.0*AntPunLow -1.0*AntPunNone' \ -glt_label 5 AnticipLowLoss-NoLoss \ -gltsym 'SYM: +1.0*AntPunHigh -1.0*AntPunNone' \ -glt_label 6 AnticipHighLoss-NoLoss \ -gltsym 'SYM: +1.0*PunLowMiss -1.0*PunNoneMiss' \ -glt_label 7 OutcomeLowLossMiss-NoLossMiss \ -gltsym 'SYM: +1.0*PunHighMiss -1.0*PunNoneMiss' \ -glt_label 8 OutcomeHighLossMiss-NoLossMiss \ -gltsym 'SYM: +0.5*AntRwdLow +0.5*AntRwdHigh -1.0*AntRwdNone' \ -glt_label 9 AnticipGain-NoGain \ -gltsym 'SYM: +0.5*AntPunLow +0.5*AntPunHigh -1.0*AntPunNone' \ -glt_label 10 AnticipLoss-NoLoss \ -gltsym 'SYM: +0.5*RwdLowHit +0.5*RwdHighHit -1.0*RwdNoneHit' \ -glt_label 11 OutcomeGainHit-NoGainHit \ -gltsym 'SYM: +0.5*PunLowMiss +0.5*PunHighMiss -1.0*PunNoneMiss' \ -glt_label 12 OutcomeLossMiss-NoLossMiss \ -gltsym 'SYM: +0.5*RwdLowMiss +0.5*RwdHighMiss -1.0*RwdNoneMiss' \ -glt_label 13 OutcomeGainMiss-NoGainMiss \ -gltsym 'SYM: +0.5*PunLowHit +0.5*PunHighHit -1.0*PunNoneHit' \ -glt_label 14 OutcomeLossHit-NoLossHit \ -gltsym 'SYM: +0.5*RwdLowHit +0.5*RwdHighHit -0.5*RwdLowMiss -0.5*RwdHighMiss' \ -glt_label 15 OutcomeGainHit-Miss \ -gltsym 'SYM: +0.5*PunLowHit +0.5*PunHighHit -0.5*PunLowMiss -0.5*PunHighMiss' \ -glt_label 16 OutcomeLossHit-Miss \ -gltsym 'SYM: +0.5*AntRwdLow +0.5*AntRwdHigh' \ -glt_label 17 AnticipAnyGain \ -gltsym 'SYM: +0.5*AntPunLow +0.5*AntPunHigh' \ -glt_label 18 AnticipAnyLoss \ -gltsym 'SYM: +0.5*RwdLowHit +0.5*RwdHighHit' \ -glt_label 19 OutcomeAnyGainHit \ -gltsym 'SYM: +0.5*PunLowMiss +0.5*PunHighMiss' \ -glt_label 20 OutcomeAnyLossMiss \ -fout -tout -x1D X.xmat.1D -xjpeg X.jpg \ -x1D_uncensored X.nocensor.xmat.1D \ -errts errts.${subj} \ -bucket stats.$subj # if 3dDeconvolve fails, terminate the script if ( $status != 0 ) then echo '---------------------------------------' echo '** 3dDeconvolve error, failing...' echo ' (consider the file 3dDeconvolve.err)' exit endif # display any large pairwise correlations from the X-matrix 1d_tool.py -show_cormat_warnings -infile X.xmat.1D |& tee out.cormat_warn.txt # -- execute the 3dREMLfit script, written by 3dDeconvolve -- tcsh -x stats.REML_cmd # if 3dREMLfit fails, terminate the script if ( $status != 0 ) then echo '---------------------------------------' echo '** 3dREMLfit error, failing...' exit endif # create an all_runs dataset to match the fitts, errts, etc. 3dTcat -prefix all_runs.$subj pb05.$subj.r*.scale+tlrc.HEAD # -------------------------------------------------- # create a temporal signal to noise ratio dataset # signal: if 'scale' block, mean should be 100 # noise : compute standard deviation of errts 3dTstat -mean -prefix rm.signal.all all_runs.$subj+tlrc"[$ktrs]" 3dTstat -stdev -prefix rm.noise.all errts.${subj}_REML+tlrc"[$ktrs]" 3dcalc -a rm.signal.all+tlrc \ -b rm.noise.all+tlrc \ -c full_mask.$subj+tlrc \ -expr 'c*a/b' -prefix TSNR.$subj # --------------------------------------------------- # compute and store GCOR (global correlation average) # (sum of squares of global mean of unit errts) 3dTnorm -norm2 -prefix rm.errts.unit errts.${subj}_REML+tlrc 3dmaskave -quiet -mask full_mask.$subj+tlrc rm.errts.unit+tlrc \ > gmean.errts.unit.1D 3dTstat -sos -prefix - gmean.errts.unit.1D\' > out.gcor.1D echo "-- GCOR = `cat out.gcor.1D`" # --------------------------------------------------- # compute correlation volume # (per voxel: average correlation across masked brain) # (now just dot product with average unit time series) 3dcalc -a rm.errts.unit+tlrc -b gmean.errts.unit.1D -expr 'a*b' -prefix rm.DP 3dTstat -sum -prefix corr_brain rm.DP+tlrc # create fitts dataset from all_runs and errts 3dcalc -a all_runs.$subj+tlrc -b errts.${subj}+tlrc -expr a-b \ -prefix fitts.$subj # create fitts from REML errts 3dcalc -a all_runs.$subj+tlrc -b errts.${subj}_REML+tlrc -expr a-b \ -prefix fitts.$subj\_REML # create ideal files for fixed response stim types # this was commented out as it seems necessary #1dcat X.nocensor.xmat.1D'[10]' > ideal_antnoloss.1D #1dcat X.nocensor.xmat.1D'[11]' > ideal_antlowloss.1D #1dcat X.nocensor.xmat.1D'[12]' > ideal_anthighloss.1D #1dcat X.nocensor.xmat.1D'[13]' > ideal_antnogain.1D #1dcat X.nocensor.xmat.1D'[14]' > ideal_antlowgain.1D #1dcat X.nocensor.xmat.1D'[15]' > ideal_anthighgain.1D #1dcat X.nocensor.xmat.1D'[16]' > ideal_consumphitnoloss.1D #1dcat X.nocensor.xmat.1D'[17]' > ideal_consumphitlowloss.1D #1dcat X.nocensor.xmat.1D'[18]' > ideal_consumphithighloss.1D #1dcat X.nocensor.xmat.1D'[19]' > ideal_consumphitnogain.1D #1dcat X.nocensor.xmat.1D'[20]' > ideal_consumphitlowgain.1D #1dcat X.nocensor.xmat.1D'[21]' > ideal_consumphithighgain.1D #1dcat X.nocensor.xmat.1D'[22]' > ideal_consumpmissnoloss.1D #1dcat X.nocensor.xmat.1D'[23]' > ideal_consumpmisslowloss.1D #1dcat X.nocensor.xmat.1D'[24]' > ideal_consumpmisshighloss.1D #1dcat X.nocensor.xmat.1D'[25]' > ideal_consumpmissnogain.1D #1dcat X.nocensor.xmat.1D'[26]' > ideal_consumpmisslowgain.1D #1dcat X.nocensor.xmat.1D'[27]' > ideal_consumpmisshighgain.1D # -------------------------------------------------------- # compute sum of non-baseline regressors from the X-matrix # (use 1d_tool.py to get list of regressor colums) set reg_cols = `1d_tool.py -infile X.nocensor.xmat.1D -show_indices_interest` 3dTstat -sum -prefix sum_ideal.1D X.nocensor.xmat.1D"[$reg_cols]" # also, create a stimulus-only X-matrix, for easy review 1dcat X.nocensor.xmat.1D"[$reg_cols]" > X.stim.xmat.1D # ============================ blur estimation ============================= # compute blur estimates touch blur_est.$subj.1D # start with empty file # create directory for ACF curve files mkdir files_ACF # -- estimate blur for each run in epits -- touch blur.epits.1D # restrict to uncensored TRs, per run foreach run ( $runs ) set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ -show_trs_run $run` if ( $trs == "" ) continue 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ -ACF files_ACF/out.3dFWHMx.ACF.epits.r$run.1D \ all_runs.$subj+tlrc"[$trs]" >> blur.epits.1D end # compute average FWHM blur (from every other row) and append set blurs = ( `3dTstat -mean -prefix - blur.epits.1D'{0..$(2)}'\'` ) echo average epits FWHM blurs: $blurs echo "$blurs # epits FWHM blur estimates" >> blur_est.$subj.1D # compute average ACF blur (from every other row) and append set blurs = ( `3dTstat -mean -prefix - blur.epits.1D'{1..$(2)}'\'` ) echo average epits ACF blurs: $blurs echo "$blurs # epits ACF blur estimates" >> blur_est.$subj.1D # -- estimate blur for each run in errts -- touch blur.errts.1D # restrict to uncensored TRs, per run foreach run ( $runs ) set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ -show_trs_run $run` if ( $trs == "" ) continue 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ -ACF files_ACF/out.3dFWHMx.ACF.errts.r$run.1D \ errts.${subj}+tlrc"[$trs]" >> blur.errts.1D end # compute average FWHM blur (from every other row) and append set blurs = ( `3dTstat -mean -prefix - blur.errts.1D'{0..$(2)}'\'` ) echo average errts FWHM blurs: $blurs echo "$blurs # errts FWHM blur estimates" >> blur_est.$subj.1D # compute average ACF blur (from every other row) and append set blurs = ( `3dTstat -mean -prefix - blur.errts.1D'{1..$(2)}'\'` ) echo average errts ACF blurs: $blurs echo "$blurs # errts ACF blur estimates" >> blur_est.$subj.1D # -- estimate blur for each run in err_reml -- touch blur.err_reml.1D # restrict to uncensored TRs, per run foreach run ( $runs ) set trs = `1d_tool.py -infile X.xmat.1D -show_trs_uncensored encoded \ -show_trs_run $run` if ( $trs == "" ) continue 3dFWHMx -detrend -mask full_mask.$subj+tlrc \ -ACF files_ACF/out.3dFWHMx.ACF.err_reml.r$run.1D \ errts.${subj}_REML+tlrc"[$trs]" >> blur.err_reml.1D end # compute average FWHM blur (from every other row) and append set blurs = ( `3dTstat -mean -prefix - blur.err_reml.1D'{0..$(2)}'\'` ) echo average err_reml FWHM blurs: $blurs echo "$blurs # err_reml FWHM blur estimates" >> blur_est.$subj.1D # compute average ACF blur (from every other row) and append set blurs = ( `3dTstat -mean -prefix - blur.err_reml.1D'{1..$(2)}'\'` ) echo average err_reml ACF blurs: $blurs echo "$blurs # err_reml ACF blur estimates" >> blur_est.$subj.1D # add 3dClustSim results as attributes to any stats dset mkdir files_ClustSim # run Monte Carlo simulations using method 'ACF' set params = ( `grep ACF blur_est.$subj.1D | tail -n 1` ) 3dClustSim -both -mask full_mask.$subj+tlrc -acf $params[1-3] \ -cmd 3dClustSim.ACF.cmd -prefix files_ClustSim/ClustSim.ACF # run 3drefit to attach 3dClustSim results to stats set cmd = ( `cat 3dClustSim.ACF.cmd` ) $cmd stats.$subj+tlrc stats.${subj}_REML+tlrc # ================== auto block: generate review scripts =================== # generate a review script for the unprocessed EPI data gen_epi_review.py -script @epi_review.$subj \ -dsets pb00.$subj.r*.tcat+orig.HEAD # generate scripts to review single subject results # (try with defaults, but do not allow bad exit status) gen_ss_review_scripts.py -mot_limit 0.3 -out_limit 0.1 -exit0 # ========================== auto block: finalize ========================== # remove temporary files \rm -f rm.* # if the basic subject review script is here, run it # (want this to be the last text output) if ( -e @ss_review_basic ) ./@ss_review_basic |& tee out.ss_review.$subj.txt # return to parent directory cd .. echo "execution finished: `date`" # ========================================================================== # script generated by the command: # # afni_proc.py -subj_id ${subj} -script proc_${subj} -scr_overwrite \ # -blocks despike tshift align tlrc volreg blur mask scale regress \ # -tcat_remove_first_trs 2 -dsets \ # /data/taylorc_group/data/R61/${subj}/${subj}MIDRun1_topup.nii.gz \ # /data/taylorc_group/data/R61/${subj}/${subj}MIDRun2_topup.nii.gz \ # -blur_in_automask -blur_size 6.0 -copy_anat \ # /data/taylorc_group/data/R61/${subj}/anatSS.${subj}.nii.gz \ # -anat_has_skull no -align_opts_aea -ginormous_move -deoblique on -cost \ # lpc+ZZ -volreg_align_to MIN_OUTLIER -volreg_align_e2a -volreg_tlrc_warp \ # -tlrc_base /software/AFNI_17.2.17/abin/MNI152_2009_template.nii.gz \ # -tlrc_NL_warp -tlrc_NL_warped_dsets anatQQ.${subj}.nii.gz \ # anatQQ.aff12.1D anatQQ.${subj}_WARP.nii.gz -regress_stim_times \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss0_Cue.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss1_Cue.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss5_Cue.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain0_Cue.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain1_Cue.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain5_Cue.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss0_Outcome_Hit.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss1_Outcome_Hit.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss5_Outcome_Hit.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain0_Outcome_Hit.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain1_Outcome_Hit.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain5_Outcome_Hit.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss0_Outcome_Miss.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss1_Outcome_Miss.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Loss5_Outcome_Miss.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain0_Outcome_Miss.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain1_Outcome_Miss.1D \ # /data/taylorc_group/data/R61/behavior/SID/ClipCat/${subj}_MID_clipcat_Gain5_Outcome_Miss.1D \ # -test_stim_files no -regress_stim_labels antnoloss antlowloss \ # anthighloss antnogain antlowgain anthighgain consumphitnoloss \ # consumphitlowloss consumphithighloss consumphitnogain consumphitlowgain \ # consumphithighgain consumpmissnoloss consumpmisslowloss \ # consumpmisshighloss consumpmissnogain consumpmisslowgain \ # consumpmisshighgain -regress_basis GAM -regress_censor_motion 0.3 \ # -regress_motion_per_run -regress_apply_mot_types demean deriv \ # -regress_censor_outliers 0.1 -regress_opts_3dD -jobs 4 -gltsym 'SYM: \ # +1.0*antlowgain -1.0*antnogain' -glt_label 1 AnticipLowGain-NoGain \ # -gltsym 'SYM: +1.0*anthighgain -1.0*antnogain' -glt_label 2 \ # AnticipHighGain-NoGain -gltsym 'SYM: +1.0*consumphitlowgain \ # -1.0*consumphitnogain' -glt_label 3 ConsumpLowGainHit-NoGainHit -gltsym \ # 'SYM: +1.0*consumphithighgain -1.0*consumphitnogain' -glt_label 4 \ # ConsumpHighGainHit-NoGainHit -gltsym 'SYM: +1.0*antlowloss \ # -1.0*antnoloss' -glt_label 5 AnticipLowLoss-NoLoss -gltsym 'SYM: \ # +1.0*anthighloss -1.0*antnoloss' -glt_label 6 AnticipHighLoss-NoLoss \ # -gltsym 'SYM: +1.0*consumpmisslowloss -1.0*consumpmissnoloss' \ # -glt_label 7 ConsumpLowLossMiss-NoLossMiss -gltsym 'SYM: \ # +1.0*consumpmisshighloss -1.0*consumpmissnoloss' -glt_label 8 \ # ConsumpHighLossMiss-NoLossMiss -gltsym 'SYM: +0.5*antlowgain \ # +0.5*anthighgain -1.0*antnogain' -glt_label 9 AnticipGain-NoGain \ # -gltsym 'SYM: +0.5*antlowloss +0.5*anthighloss -1.0*antnoloss' \ # -glt_label 10 AnticipLoss-NoLoss -gltsym 'SYM: +0.5*consumphitlowgain \ # +0.5*consumphithighgain -1.0*consumphitnogain' -glt_label 11 \ # ConsumpGainHit-NoGainHit -gltsym 'SYM: +0.5*consumpmisslowloss \ # +0.5*consumpmisshighloss -1.0*consumpmissnoloss' -glt_label 12 \ # ConsumpLossMiss-NoLossMiss -gltsym 'SYM: +0.5*consumpmisslowgain \ # +0.5*consumpmisshighgain -1.0*consumpmissnogain' -glt_label 13 \ # ConsumpGainMiss-NoGainMiss -gltsym 'SYM: +0.5*consumphitlowloss \ # +0.5*consumphithighloss -1.0*consumphitnoloss' -glt_label 14 \ # ConsumpLossHit-NoLossHit -gltsym 'SYM: +0.5*consumphitlowgain \ # +0.5*consumphithighgain -0.5*consumpmisslowgain \ # -0.5*consumpmisshighgain' -glt_label 15 ConsumpGainHit-Miss -gltsym \ # 'SYM: +0.5*consumphitlowloss +0.5*consumphithighloss \ # -0.5*consumpmisslowloss -0.5*consumpmisshighloss' -glt_label 16 \ # ConsumpLossHit-Miss -gltsym 'SYM: +0.5*antlowgain +0.5*anthighgain' \ # -glt_label 17 AnticipAnyGain -gltsym 'SYM: +0.5*antlowloss \ # +0.5*anthighloss' -glt_label 18 AnticipAnyLoss -gltsym 'SYM: \ # +0.5*consumphitlowgain +0.5*consumphithighgain' -glt_label 19 \ # ConsumpAnyGainHit -gltsym 'SYM: +0.5*consumpmisslowloss \ # +0.5*consumpmisshighloss' -glt_label 20 ConsumpAnyLossMiss \ # -regress_compute_fitts -regress_reml_exec -regress_make_ideal_sum \ # sum_ideal.1D -regress_est_blur_epits -regress_est_blur_errts \ # -regress_run_clustsim yes