diff --git a/02_normalize_harmonize_proteomics.html b/02_normalize_harmonize_proteomics.html deleted file mode 100644 index 803d592..0000000 --- a/02_normalize_harmonize_proteomics.html +++ /dev/null @@ -1,841 +0,0 @@ - - - - - - - - - - - - - - - -Reformat and process proteomics - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - -
-

Normalize phospho-proteomics

-

We now have phospho proteomics from two cohorts. Here I’m trying to -collect data from both and normalize but am clearly missing something. I -do the following:

-
    -
  1. replace all zero values with NA to avoid skewing normalization
  2. -
  3. remove any features that are absent from >50% of the samples
  4. -
  5. take the log of the data, then take a modified z score
  6. -
-

Each dataset is done individually then combined at the end. There is -a clear batch effect.

-
-

Cohort 1 phospho

-

We start with the cohort 1 phospho data here.

-
##cohort 1 phospho
-##first we read in file, and get site info
-phospho1<- read.table(syn$get('syn69963552')$path,sep='\t',fill=NA,header=T,quote='"') |>
-  subset(!is.na(`Gene.Names`)) |>
-  subset(Gene.Names!='') |>
-  mutate(lsite=tolower(Residue)) |>
-  tidyr::unite(c(Residue,Site,lsite),col='site',sep='') |>
-  tidyr::unite(c(`Gene.Names`,site),col='site',sep='-') |>
-  as.data.frame()
-
-phospho1[which(phospho1==0,arr.ind=TRUE)]<-NA
-
-pfnames1 <- data.frame(fname=colnames(phospho1)[5:ncol(phospho1)])|>
-  mutate(aliquot=sapply(fname,function(x) unlist(strsplit(x,split='_'))[8]))|>
-  mutate(aliquot=as.double(aliquot))|>
-  mutate(cohort=1)
- 
-##logtransform##median transform
-pzeros<-which(apply(phospho1[,5:ncol(phospho1)],1,function(x)
-    length(which(is.na(x)))/length(x) < 0.5))
-
-pmat1<-apply(0.01+log2(phospho1[pzeros,5:ncol(phospho1)]),2,
-             function(x) {0.6745 *(x-median(x,na.rm=T))/mad(x,na.rm=T)}) |>
-  as.data.frame() |>
-  mutate(site=phospho1$site[pzeros])
-
-##move to long form, upload
-plong1<-pmat1|>
-  tidyr::pivot_longer(1:(ncol(pmat1)-1),names_to='fname',values_to='abundance')|>
-  left_join(pfnames1) |>
-  group_by(site,fname,aliquot,cohort) |>
-  summarize(meanAbundance=mean(abundance,na.rm=T))|>
-  subset(!is.na(meanAbundance))|>
-  left_join(meta)
-
## Joining with `by = join_by(fname)`
-## `summarise()` has grouped output by 'site', 'fname', 'aliquot'. You can
-## override using the `.groups` argument.
-## Joining with `by = join_by(aliquot, cohort)`
-
readr::write_csv(plong1,file='log2normMedCenteredPhospho.csv')
-syn$store(File('log2normMedCenteredPhospho.csv',parentId='syn70078365'))
-
## File(id='syn65598472', synapseStore=True, modifiedOn='2025-10-07T16:10:10.287Z', dataFileHandleId='163828412', versionNumber=51, name='log2normMedCenteredPhospho.csv', createdBy='1418096', parentId='syn70078365', path='log2normMedCenteredPhospho.csv', _file_handle={'id': '163828412', 'etag': 'c532fccc-538b-4345-85ee-78a55c4db07b', 'createdBy': '1418096', 'createdOn': '2025-10-02T01:31:03.000Z', 'modifiedOn': '2025-10-02T01:31:03.000Z', 'concreteType': 'org.sagebionetworks.repo.model.file.S3FileHandle', 'contentType': 'text/csv', 'contentMd5': '479eaaac2d8f26d5ef94fbec8a7d4c6d', 'fileName': 'log2normMedCenteredPhospho.csv', 'storageLocationId': 1, 'contentSize': 11566930, 'status': 'AVAILABLE', 'bucketName': 'proddata.sagebase.org', 'key': '1418096/aff2a055-c32f-4a5c-bc5b-421908f76e6b/log2normMedCenteredPhospho.csv', 'previewId': '163828414', 'isPreview': False, 'externalURL': None}, concreteType='org.sagebionetworks.repo.model.FileEntity', isLatestVersion=True, cacheDir='', etag='0fc57caf-2cbf-4e7e-88a1-d4fce206142e', files=['log2normMedCenteredPhospho.csv'], modifiedBy='1418096', versionLabel='51', createdOn='2025-03-20T19:21:02.864Z')
-

The file is uploaded to synapse.

-
-
-

Cohort 2 phospho

-

Now on October 7 we can process the second batch of phospho.

-
##cohort 2 phospho
-##1 read in data
-phospho2 <- read.table(syn$get('syn69947351')$path,sep='\t',fill=NA,header=T,quote='"') |>
-  subset(!is.na(`Gene.Names`)) |>
-  subset(Gene.Names!='') |>
-  mutate(lsite=tolower(Residue)) |>
-  tidyr::unite(c(Residue,Site,lsite),col='site',sep='') |>
-  tidyr::unite(c(`Gene.Names`,site),col='site',sep='-')
-
-
-phospho2[which(phospho2==0,arr.ind=TRUE)] <- NA
-
-pfnames2 <- data.frame(fname=colnames(phospho2)[5:ncol(phospho2)]) |>
-  mutate(aliquot=sapply(fname,function(x) unlist(strsplit(x,split='_'))[9])) |>
-  mutate(aliquot=as.double(aliquot))|>
-  mutate(cohort=2)
-
-##remove missingness 
-tm <- which(apply(phospho2[,5:ncol(phospho2)],1,function(x) length(which(is.na(x)))/length(x) < 0.5))
-
-##log2 adjusted z score
-pmat2<-apply(log2(0.01+phospho2[tm,5:ncol(phospho2)]),2,
-              function(x) {0.6745 *(x-median(x,na.rm=T))/mad(x,na.rm=T)}) |>
-  as.data.frame() |>
-  mutate(site=phospho2$site[tm])
-
-
-
-plong2<-pmat2|>
-  tidyr::pivot_longer(1:(ncol(pmat2)-1),names_to='fname',values_to='abundance') |>
-  left_join(pfnames2)|>
-  group_by(site,fname,aliquot,cohort) |>
-  summarize(meanAbundance=mean(abundance,na.rm=T)) |>
-  subset(!is.na(meanAbundance))|>
-  left_join(meta)
-
## Joining with `by = join_by(fname)`
-## `summarise()` has grouped output by 'site', 'fname', 'aliquot'. You can
-## override using the `.groups` argument.
-## Joining with `by = join_by(aliquot, cohort)`
-
##save to file
-readr::write_csv(plong2,file='log2normMedCenteredPhospho_cohort2.csv')
-syn$store(File('log2normMedCenteredPhospho_cohort2.csv',parentId='syn70078365'))
-
## File(synapseStore=True, id='syn70078413', path='log2normMedCenteredPhospho_cohort2.csv', createdOn='2025-10-07T15:56:04.262Z', createdBy='1418096', files=['log2normMedCenteredPhospho_cohort2.csv'], _file_handle={'id': '163828418', 'etag': '1d896b0a-e1eb-4083-a6e0-b3f266dc59ce', 'createdBy': '1418096', 'createdOn': '2025-10-02T01:31:22.000Z', 'modifiedOn': '2025-10-02T01:31:22.000Z', 'concreteType': 'org.sagebionetworks.repo.model.file.S3FileHandle', 'contentType': 'text/csv', 'contentMd5': '84b9a91da42f0630ebbd37e0ad9b22a7', 'fileName': 'log2normMedCenteredPhospho_cohort2.csv', 'storageLocationId': 1, 'contentSize': 6535765, 'status': 'AVAILABLE', 'bucketName': 'proddata.sagebase.org', 'key': '1418096/b3ab724b-02f7-4b08-8ea0-0191c2835ee6/log2normMedCenteredPhospho_cohort2.csv', 'previewId': '163828419', 'isPreview': False, 'externalURL': None}, name='log2normMedCenteredPhospho_cohort2.csv', modifiedOn='2025-10-07T16:10:11.331Z', dataFileHandleId='163828418', etag='755cc7b3-9fae-473e-b684-9e81d4200f95', concreteType='org.sagebionetworks.repo.model.FileEntity', versionNumber=3, isLatestVersion=True, cacheDir='', versionLabel='3', parentId='syn70078365', modifiedBy='1418096')
-

Now that we have two cohorts we can try to combine without batch -correction.

-
-
-

Combined phospho

-

Combining the phoshpo data here.

-
##now we move back to long form
-plong <- rbind(plong1,plong2)
-  #pmat |>
-#  as.data.frame()|>
-#  tibble::rownames_to_column('site')|>
-#  pivot_longer(-site,names_to='fname',values_to='abundance')|>
-#  left_join(rbind(pfnames1,pfnames2))|>
-#    group_by(site,fname,aliquot,cohort) |>
-#  summarize(meanAbundance=mean(abundance,na.rm=T)) |>
-#  left_join(meta)
-
-         
-compsites <- plong|>
-#  subset(meanAbundance>(-5))|>
-  group_by(site)|>
-  summarize(spec = n_distinct(Specimen))|>
-  subset(spec==31)
-
-#plong$meanAbundance[which(!is.finite(plong$meanAbundance))]<-0
-
-ppcs<-plong|>ungroup()|>
-  dplyr::select(Specimen,meanAbundance,site)|>
-  unique()|>
-  subset(site%in%compsites$site)|>
-  #subset(!is.na(site))|>
-  #subset(!is.na(meanAbundance))|>
-  tidyr::pivot_wider(names_from='Specimen',values_from='meanAbundance',
-                     values_fn=mean,values_fill=0)|>
-  tibble::column_to_rownames('site')|>
-  t()|>
-  prcomp()
-
-pplot<-ppcs$x|>
-  as.data.frame()|>
-  dplyr::select(PC1,PC2,PC3)|>
-  tibble::rownames_to_column('Specimen')|>
-  left_join(meta)|>
-  dplyr::select(PC2,PC1,Specimen,Patient,cohort)|>
-  mutate(cohort=as.factor(cohort))|>
-  distinct()|>
-  ggplot(aes(x=PC1,y=PC2,label=Specimen,col=Patient,shape=cohort))+
-    geom_point()+
-    #ggrepel::geom_label_repel()+
-    ggtitle("Phospho samples")+
-  ggplot2::scale_color_manual(values=pcols)
-
## Joining with `by = join_by(Specimen)`
-
ph<- plong |>ungroup()|>
-  subset(site%in%compsites$site)|>
-  ggplot(aes(x=meanAbundance,fill=as.factor(cohort)))+geom_histogram()
-
-cowplot::plot_grid(ph,pplot)
-
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
-

-
ggsave('cNFPhosphoQC.png',width=10)
-
## Saving 10 x 5 in image
-
pplot
-

-
ggsave('phosphoPCA.pdf')
-
## Saving 7 x 5 in image
-

Clearly there is a strong batch effect.

-
-
-
-

Normalize golbal proteomics

-

Now we can move onto the global data

-
-

Cohort 1 global

-

Global proteomics in cohort 1 here.

-
####now process global
-#global1<-readr::read_tsv(syn$get('syn64906445')$path)
-global1 <- read.table(syn$get('syn69947355')$path,sep='\t',header=T,quote='"') |>
-  tidyr::separate_rows(Genes,sep=';')
-##logtransform, medina transform
-
-#global1[which(global1==0,arr.ind=TRUE)]<-NA
-
-gmat1<-apply(log2(global1[,5:ncol(global1)]),2,function(x) 0.6745 *(x-median(x,na.rm=T))/mad(x,na.rm=T))
-
-gmat1<-gmat1|>
-  as.data.frame()|>
-  mutate(Genes=global1$Genes)
-
-##extract aliquot info from file name
-gfnames1 <- data.frame(fname=colnames(global1)[5:ncol(global1)]) |>
-  mutate(aliquot=sapply(fname,function(x) unlist(strsplit(x,split='_'))[6])) |>
-  mutate(aliquot=as.double(aliquot))|>
-  mutate(cohort=1)
-
-glong1<-gmat1|>
-    tidyr::pivot_longer(1:(ncol(gmat1)-1),names_to='fname',values_to='abundance')|>
- left_join(gfnames1)|>
-   group_by(Genes,fname,aliquot,cohort)|>
-  summarize(meanAbundance=mean(abundance))|>
-  subset(is.finite(meanAbundance))|>
-  left_join(meta)
-
## Joining with `by = join_by(fname)`
-## `summarise()` has grouped output by 'Genes', 'fname', 'aliquot'. You can
-## override using the `.groups` argument.
-## Joining with `by = join_by(aliquot, cohort)`
-
readr::write_csv(glong1,file='log2normMedCenteredGlobal.csv')
-syn$store(File('log2normMedCenteredGlobal.csv',parentId='syn70078365'))
-
## File(files=['log2normMedCenteredGlobal.csv'], synapseStore=True, name='log2normMedCenteredGlobal.csv', _file_handle={'id': '163828754', 'etag': 'ecb596c9-4869-4413-9838-eb66cb83b76b', 'createdBy': '1418096', 'createdOn': '2025-10-02T02:09:54.000Z', 'modifiedOn': '2025-10-02T02:09:54.000Z', 'concreteType': 'org.sagebionetworks.repo.model.file.S3FileHandle', 'contentType': 'text/csv', 'contentMd5': 'd6c01c9ff6bf97322703ae1d5bbc8349', 'fileName': 'log2normMedCenteredGlobal.csv', 'storageLocationId': 1, 'contentSize': 20723175, 'status': 'AVAILABLE', 'bucketName': 'proddata.sagebase.org', 'key': '1418096/75b0981e-7e42-4c6d-b5aa-1443652f6c43/log2normMedCenteredGlobal.csv', 'previewId': '163828755', 'isPreview': False, 'externalURL': None}, etag='f8f7705a-9072-42f8-9616-31c447e8f787', createdBy='1418096', path='log2normMedCenteredGlobal.csv', createdOn='2025-03-20T19:29:54.101Z', modifiedOn='2025-10-07T16:10:14.595Z', concreteType='org.sagebionetworks.repo.model.FileEntity', versionLabel='43', isLatestVersion=True, cacheDir='', dataFileHandleId='163828754', parentId='syn70078365', id='syn65599827', versionNumber=43, modifiedBy='1418096')
-
-
-

Cohort 2 global

-

October 7 we process the second cohort.

-
global2<-read.table(syn$get('syn69947352')$path,header=T,sep='\t',quote='"')|>
-  tidyr::separate_rows(Genes,sep=';')
-
-#global2[which(global2==0,arr.ind=TRUE)]<-NA
-
-gmat2<-apply(log2(global2[,5:ncol(global2)]),2,function(x) 
-  0.6745 *(x-median(x,na.rm=T))/mad(x,na.rm=T))
-rownames(gmat2)<-global2$Genes
-
-gmat2<-gmat2|>
-  as.data.frame()|>
-  mutate(Genes=global2$Genes)
-
-gfnames2 <- data.frame(fname=colnames(global2)[5:ncol(global2)]) |>
-  mutate(aliquot=sapply(fname,function(x) unlist(strsplit(x,split='_'))[7])) |>
-  mutate(aliquot=as.double(aliquot))|>
-  mutate(cohort=2)
-
-glong2<-gmat2|>
-  tidyr::pivot_longer(1:(ncol(gmat2)-1),names_to='fname',values_to='abundance')|>
-  left_join(gfnames2)|>
-  group_by(Genes,fname,aliquot,cohort)|>
-  summarize(meanAbundance=mean(abundance))|>
-  subset(is.finite(meanAbundance))|>
-  left_join(meta)
-
## Joining with `by = join_by(fname)`
-## `summarise()` has grouped output by 'Genes', 'fname', 'aliquot'. You can
-## override using the `.groups` argument.
-## Joining with `by = join_by(aliquot, cohort)`
-
#dupes<-global|>group_by(Genes)|>summarize(numIso=n())|>
-#  subset(numIso>1)
-
-
-readr::write_csv(glong2,file='log2normMedCenteredGlobal_cohort2.csv')
-syn$store(File('log2normMedCenteredGlobal_cohort2.csv',parentId='syn70078365'))
-
## File(synapseStore=True, files=['log2normMedCenteredGlobal_cohort2.csv'], createdBy='1418096', dataFileHandleId='163828776', createdOn='2025-10-07T15:56:09.318Z', parentId='syn70078365', modifiedOn='2025-10-07T16:10:16.010Z', name='log2normMedCenteredGlobal_cohort2.csv', etag='d11f86af-5af5-49f7-abf6-2bae20de1c9b', concreteType='org.sagebionetworks.repo.model.FileEntity', versionNumber=3, isLatestVersion=True, _file_handle={'id': '163828776', 'etag': 'ea261f85-be11-4c15-b745-fe79324c9b19', 'createdBy': '1418096', 'createdOn': '2025-10-02T02:12:01.000Z', 'modifiedOn': '2025-10-02T02:12:01.000Z', 'concreteType': 'org.sagebionetworks.repo.model.file.S3FileHandle', 'contentType': 'text/csv', 'contentMd5': '4da78f673cfedc27c22cdb76a0663db8', 'fileName': 'log2normMedCenteredGlobal_cohort2.csv', 'storageLocationId': 1, 'contentSize': 13474091, 'status': 'AVAILABLE', 'bucketName': 'proddata.sagebase.org', 'key': '1418096/6c66fd73-e5d0-4f05-acc1-2aca33440948/log2normMedCenteredGlobal_cohort2.csv', 'previewId': '163828779', 'isPreview': False, 'externalURL': None}, cacheDir='', versionLabel='3', id='syn70078414', modifiedBy='1418096', path='log2normMedCenteredGlobal_cohort2.csv')
-
-
-

Global combined without batch correction

-

Now we can combine the global withot batch correction.

-
#ma<-mean(glong$abundance,na.rm=T)
-#glong$meanAbundance[which(!is.finite(glong$meanAbundance))]<-0
-glong <- rbind(glong1,glong2)|>
-  subset(Genes!="")
-      
-compsites <- glong|>
-#  subset(meanAbundance>(-5))|>
-  group_by(Genes)|>
-  summarize(spec = n_distinct(Specimen))|>
-  subset(spec==31)
-
-gpcs<-glong|>ungroup()|>
-  dplyr::select(Specimen,meanAbundance,Genes)|>
-  subset(!is.na(Genes))|>
-  subset(Genes!="")|>
-  subset(Genes%in%compsites$Genes)|>
-  subset(!is.na(meanAbundance))|>
-  tidyr::pivot_wider(names_from='Specimen',values_from='meanAbundance',values_fn=mean,values_fill=0)|>
-  tibble::column_to_rownames('Genes')|>t()|>
-  prcomp()
-
-gplot<-gpcs$x|>
-  as.data.frame()|>
-  dplyr::select(PC1,PC2)|>
-  tibble::rownames_to_column('Specimen')|>
-  left_join(meta)|>
-    dplyr::select(PC1,PC2,Specimen,Patient,cohort)|>
-    mutate(cohort=as.factor(cohort))|>
-  distinct()|>
-  ggplot(aes(x=PC1,y=PC2,label=Specimen,col=Patient,shape=cohort))+
-  geom_point()+ggrepel::geom_label_repel()+ggtitle("Global samples")+
-  scale_color_manual(values=pcols)
-
## Joining with `by = join_by(Specimen)`
-
hplot <- ggplot(glong,aes(x=meanAbundance,fill=as.factor(cohort)))+geom_histogram()
-     
-
-cowplot::plot_grid(hplot,gplot)
-
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
-

-
ggsave('cNFGlobalQC.png',width=10)
-
## Saving 10 x 5 in image
-
gplot
-

-
ggsave('globalPCA.pdf')
-
## Saving 7 x 5 in image
-
-
-
-

Evaluate batch correction

-

Now we have two separate long tables with metadata, but we would like -to combine into a single one and batch correct.We can update this with -each cohort.

-
##phospho 
-##TODO: ideally we should use the long tables and reconvert
-pmat <- merge(as.data.frame(pmat1),as.data.frame(pmat2))
-
-gmat <- merge(gmat1,gmat2)
-
-##remove duplicated sites
-dsites<- unique(pmat$site[which(duplicated(pmat$site))])
-mvals<-sapply(dsites,function(x) colSums(pmat[pmat$site==x,2:ncol(pmat)])) |>
-  t() |>
-  as.data.frame() |>
-  tibble::rownames_to_column('site')
-
-pmat <- pmat |>
-  subset(!site %in% dsites) |>
-  rbind(mvals)
-
-##now convert to matrix
-pmat <- pmat |>
-  tibble::remove_rownames() |>
-  tibble::column_to_rownames('site') |>
-  as.matrix()
-
-gmat <- gmat |>
-  subset(Genes!='')|>
-    tibble::remove_rownames() |>
-  tibble::column_to_rownames('Genes') |>
-    as.matrix()
-##sigh, batch correct?
-library(sva)
-
## Loading required package: mgcv
-
## Loading required package: nlme
-
## 
-## Attaching package: 'nlme'
-
## The following object is masked from 'package:dplyr':
-## 
-##     collapse
-
## This is mgcv 1.9-3. For overview type 'help("mgcv-package")'.
-
## Loading required package: genefilter
-
## Loading required package: BiocParallel
-
## Warning: package 'BiocParallel' was built under R version 4.4.3
-
  pmat[which(!is.finite(pmat),arr.ind=T)] <- 0.0
-  cbmat<-sva::ComBat(pmat,batch=meta$cohort,mean.only = FALSE)
-
## Found2batches
-
## Adjusting for0covariate(s) or covariate level(s)
-
## Standardizing Data across genes
-
## Fitting L/S model and finding priors
-
## Finding parametric adjustments
-
## Adjusting the Data
-
  gmat[which(!is.finite(gmat),arr.ind=T)] <- 0.0
-  cgmat <- sva::ComBat(gmat,batch=meta$cohort,mean.only = FALSE)
-
## Found2batches
-
## Adjusting for0covariate(s) or covariate level(s)
-
## Standardizing Data across genes
-
## Fitting L/S model and finding priors
-
## Finding parametric adjustments
-
## Adjusting the Data
-
 ppcs<-prcomp(t(cbmat))
- 
- pplot<-ppcs$x|>
-  as.data.frame()|>
-  dplyr::select(PC1,PC2)|>
-  tibble::rownames_to_column('fname')|>
-   left_join(rbind(pfnames1,pfnames2))|>
-  left_join(meta)|>
-    dplyr::select(PC1,PC2,Specimen,Patient,cohort)|>
-    mutate(cohort=as.factor(cohort))|>
-  distinct()|>
-  ggplot(aes(x=PC1,y=PC2,label=Specimen,col=Patient,shape=cohort))+
-  geom_point()+ggrepel::geom_label_repel()+ggtitle("Corrected phospho samples")+
-  scale_color_manual(values=pcols)
-
## Joining with `by = join_by(fname)`
-
## Joining with `by = join_by(aliquot, cohort)`
-
 pplot
-
## Warning: ggrepel: 2 unlabeled data points (too many overlaps). Consider
-## increasing max.overlaps
-

-
 gpcs<-prcomp(t(cgmat))
- gplot<-gpcs$x|>
-  as.data.frame()|>
-  dplyr::select(PC1,PC2)|>
-  tibble::rownames_to_column('fname')|>
-   left_join(rbind(gfnames1,gfnames2))|>
-  left_join(meta)|>
-    dplyr::select(PC1,PC2,Specimen,Patient,cohort)|>
-    mutate(cohort=as.factor(cohort))|>
-  distinct()|>
-  ggplot(aes(x=PC1,y=PC2,label=Specimen,col=Patient,shape=cohort))+
-  geom_point()+ggrepel::geom_label_repel()+ggtitle("Corrected global samples")+
-  scale_color_manual(values=pcols)
-
## Joining with `by = join_by(fname)`
-## Joining with `by = join_by(aliquot, cohort)`
-
 gplot
-
## Warning: ggrepel: 11 unlabeled data points (too many overlaps). Consider
-## increasing max.overlaps
-

-
-
-

Upload batch-corrected data to synapse

-

Now we can reformat the batch-corrected data and upload to syanps

-
pc_long <- cbmat |>
-  as.data.frame() |>
-    tibble::rownames_to_column('site') |>
-    pivot_longer(-site,names_to = 'fname',values_to = 'correctedAbundance') |>
-  left_join(rbind(pfnames1,pfnames2)) |>
-  left_join(meta) |>
-  distinct()
-
## Joining with `by = join_by(fname)`
-## Joining with `by = join_by(aliquot, cohort)`
-
gc_long <- cgmat |>
-   as.data.frame() |>
-    tibble::rownames_to_column('Gene') |>
-    pivot_longer(-Gene,names_to = 'fname',values_to = 'correctedAbundance') |>
-  left_join(rbind(gfnames1,gfnames2)) |>
-  left_join(meta) |>
-  distinct()
-
## Joining with `by = join_by(fname)`
-## Joining with `by = join_by(aliquot, cohort)`
-
readr::write_csv(pc_long,file='batch12_correctedPhospho.csv')
-readr::write_csv(gc_long,file='batch12_correctedGlobal.csv')
-
-syn$store(File('batch12_correctedPhospho.csv',parentId='syn70078365'))
-
## File(modifiedOn='2025-10-07T16:10:24.600Z', synapseStore=True, etag='4ea27841-ef8e-4944-9a83-56d5ece5a31b', createdBy='1418096', path='batch12_correctedPhospho.csv', files=['batch12_correctedPhospho.csv'], id='syn70078415', parentId='syn70078365', concreteType='org.sagebionetworks.repo.model.FileEntity', versionNumber=3, isLatestVersion=True, cacheDir='', name='batch12_correctedPhospho.csv', versionLabel='3', createdOn='2025-10-07T15:56:24.984Z', modifiedBy='1418096', _file_handle={'id': '163984316', 'etag': '634ec130-4da8-4401-9731-511b589704b8', 'createdBy': '1418096', 'createdOn': '2025-10-07T15:56:25.000Z', 'modifiedOn': '2025-10-07T15:56:25.000Z', 'concreteType': 'org.sagebionetworks.repo.model.file.S3FileHandle', 'contentType': 'text/csv', 'contentMd5': 'df202d20b8b8bcee6b0fbf1a7a712f37', 'fileName': 'batch12_correctedPhospho.csv', 'storageLocationId': 1, 'contentSize': 17060666, 'status': 'AVAILABLE', 'bucketName': 'proddata.sagebase.org', 'key': '1418096/167bfcda-58b7-43d4-9457-4238fdae1118/batch12_correctedPhospho.csv', 'previewId': '163984318', 'isPreview': False, 'externalURL': None}, dataFileHandleId='163984316')
-
syn$store(File('batch12_correctedGlobal.csv',parentId='syn70078365'))
-
## File(synapseStore=True, createdBy='1418096', files=['batch12_correctedGlobal.csv'], name='batch12_correctedGlobal.csv', _file_handle={'id': '163984320', 'etag': 'f63e08df-542c-4642-a0f8-5314cda43a43', 'createdBy': '1418096', 'createdOn': '2025-10-07T15:56:29.000Z', 'modifiedOn': '2025-10-07T15:56:29.000Z', 'concreteType': 'org.sagebionetworks.repo.model.file.S3FileHandle', 'contentType': 'text/csv', 'contentMd5': '27e185221a058dafc833d1c2a52e74c1', 'fileName': 'batch12_correctedGlobal.csv', 'storageLocationId': 1, 'contentSize': 33970670, 'status': 'AVAILABLE', 'bucketName': 'proddata.sagebase.org', 'key': '1418096/c5da77ae-82a3-41a1-9045-d66fa87085a7/batch12_correctedGlobal.csv', 'previewId': '163984321', 'isPreview': False, 'externalURL': None}, id='syn70078416', createdOn='2025-10-07T15:56:29.301Z', concreteType='org.sagebionetworks.repo.model.FileEntity', cacheDir='', versionNumber=3, versionLabel='3', parentId='syn70078365', path='batch12_correctedGlobal.csv', modifiedBy='1418096', etag='935bbae3-5660-463b-b659-3d4a64de414b', isLatestVersion=True, modifiedOn='2025-10-07T16:10:25.266Z', dataFileHandleId='163984320')
-
- - - - -
- - - - - - - - - - - - - - - diff --git a/README.md b/README.md index 7c6c957..19f9fdf 100644 --- a/README.md +++ b/README.md @@ -5,4 +5,10 @@ This repository manages the code to analyze the results of drug screens in cNF o The data for this code is hosted on Synapse at http://synapse.org/cnfDrugResponse ## Current analysis -We are collecting omics measurements from cNF organoid samples together with drug response data to identify potential biomarkers of drug response. +We are collecting omics measurements from cNF organoid samples together with drug response data to identify potential biomarkers of drug response. + +The current analysis workflow (see `analysis/`) is organized as a small set of notebooks that: +- Join batches and (when needed) batch-correct RNA, global proteomics, and phosphoproteomics (via ComBat), and generate basic QC plots (e.g., PCA). +- Summarize drug response behavior across the cohort (most efficacious / most variable drugs + a cohort-wide drug heatmap). +- Correlate drug response with molecular features per modality (Spearman correlations + FDR), producing per-drug summaries of correlated features. +- Perform pathway enrichment on correlated features (direction-aware enrichment with leapR) to interpret putative mechanisms and biomarkers. diff --git a/analysis/01_run_normalize_omics.Rmd b/analysis/01_run_normalize_omics.Rmd new file mode 100644 index 0000000..39b30a3 --- /dev/null +++ b/analysis/01_run_normalize_omics.Rmd @@ -0,0 +1,222 @@ +--- +title: "Normalize Omics And Batch Correct" +author: "Jeremy Jacobson" +date: "`r format(Sys.Date(), '%B %d, %Y')`" +--- + +### Purpose + +This notebook is designed to process multiple batches from transcriptomics, proteomics, and phosphoproteomics data. + +In its simplest form, it will load the data in from synapse and join it together in a single matrix. Additional options include batch correction (needed for global proteomics and phosphoproteomics), batch correction plots (before and after), and uploading the data back to synapse. + +### Get Helper Scripts + +These scripts contain the functions that we will call below. +- 00_cNF_helper_code.R is a basic script that pulls data and sets colors for samples. +- 01_normalize_batchcorrect_omics.R contains all of the code for retrieving batches from synapse, + performing batch correction, plotting, and uploading. + + +```{r initiate, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) +``` + +```{r setup, echo=TRUE, results='hide', message=FALSE, warning=FALSE} +library(synapser) +syn <- list(get = synapser::synGet, store = synapser::synStore) + +# Load helper metadata (00_cNF_helper_code.R defines 'meta' and 'pcols') +source("../source/00_cNF_helper_code.R") + +# Source the pipeline +source("../source/01_normalize_batchcorrect_omics.R") + +``` + +## Run Batch Correction/Normalization across RNA, Global and Phospho Data. + +#### Clean Data +Here are the IDs of several samples that were used for protocol optimization but not designed as experimental samples. +We want to remove these from the subsequent batch correction steps as they could skew the "real" results. +- Add to this list if more protocol optimization, contaminated samples or anything else we want to remove is present. + +```{r} +# Substrings to drop (These were the protocol optimization samples) +drop_subs <- c( + "cNF_organoid_DIA_G_02_11Feb25", + "cNF_organoid_DIA_G_05_11Feb25", + "cNF_organoid_DIA_G_06_11Feb25", + "cNF_organoid_DIA_P_02_29Jan25", + "cNF_organoid_DIA_P_05_11Feb25", + "cNF_organoid_DIA_P_06_11Feb25" +) + +``` + +#### The Main Function + +We use `run_modality()` to perform all of the described steps. + +**Brief Summary**: + +- Input: We assign batches to the synapse files using the `batches` vector along with several other descriptive details / params. +- Output: We get out a combined matrix of data that is (optionally) batch corrected. + +**Reference guide**: + +##### Inputs + +- modality: "global", "phospho", or "rna" +- batches : list of batch configs, e.g. + + - list( + list(syn_id="syn...", cohort=1, value_start_col=5, fname_aliquot_index=8), + list(syn_id="syn...", cohort=2, value_start_col=5, fname_aliquot_index=9) + ) + + - Required per batch: + - syn_id : Synapse file ID for the wide feature×sample table + - cohort : cohort/batch label (used for joining + ComBat) + - fname_aliquot_index: token index (split on "_") used to parse aliquot from sample headers + + - Optional per batch: + - value_start_col: first sample column index (auto-detected if omitted) + +- meta: sample metadata joined by (aliquot, cohort); used to populate Specimen/Patient/Tumor +- syn: synapser client + +##### Options + +- drop_name_substrings: regex pattern(s) to drop unwanted sample columns +- out_dir / out_prefix / save_basename: control output file locations/names +- write_outputs: write CSV/PDF (+upload) vs return in-memory only +- upload_parent_id: Synapse folder/project ID for uploads +- pcols: optional named colors for PCA (Patient -> color) +- do_batch_correct: run ComBat (TRUE) or skip (FALSE) + +##### What it does + +- Normalizes each batch (per modality), combines batches on shared features, optionally runs ComBat by cohort, then exports long tables + PCA/hist plots. + +##### Returns + +- SummarizedExperiments: se_batches, se_combined, se_post (and se_corrected if ComBat ran) +- Long tables: long_pre, long_post +- PCA data + plots: pca_df_pre/post, plots + +### RNA Batches + +I am starting with the RNA batches as these are the simplest ones, not actually requiring batch correction (seen in the plot titled "rna samples" after the code runs). + +I'm going to describe the `batches` vector/argument that is required by the `run_modality` script here. + +- A different version of this is needed when more batches are present, omics changes, or the files are modified. +- Each file (even within omics type) may be formatted slightly differently so `value_start_col` and `fname_aliquot_index` may need to be set differently between batches. + +For the rna_batches vector, files are formatted identically so the `value_start_col` are both set to 3 and `fname_aliquot_index` is NULL. + +- **value_start_col**: First column where data begins. +- **fname_aliquot_index**: aliquot index (not used in RNA) +- **cohort**: this variable is used to set the batch number. + +This code is also written to handle different name formats in the headers such as NF0018.T1.organoid, NF0018.T1.tissue, NF0018.skin, NF0018.T2.organoids, NF0018.T2, NF0018.T3.organoids, NF0018.T3. ie: organoid vs organoids, or mixed up capitalization. + + +```{r rna, message=FALSE, warning=FALSE, fig.height=3.5, fig.width=6, out.width="50%"} +rna_batches <- list( + list(syn_id = "syn66352931", cohort = 1, value_start_col = 3, fname_aliquot_index = NULL), + list(syn_id = "syn70765053", cohort = 2, value_start_col = 3, fname_aliquot_index = NULL) +) + +rna <- run_modality( + modality = "rna", + batches = rna_batches, + meta = meta, + syn = syn, + drop_name_substrings = drop_subs, + out_dir = "rna_test", + out_prefix = "rna", + upload_parent_id = "syn71099587", + pcols = pcols, + write_outputs = FALSE, # This has already been run so we don't need to write it again unless something changes. + save_basename = "RNA_12_no_batch_correct", + do_batch_correct = FALSE #Note this is set to false right now. Doesn't appear to be needed for RNA +) +``` +In the plots above, we see that no batch correction was required or performed. + +### Global Proteomic Batches + +Okay, now we are moving on to global proteomics. This is run in the same functional method as the RNA data. We set our batch info, assign modality and other basic info in `run_modality` and then we run it. Again, we don't upload the data (by default atleast). However, we do batch correct here - the reason why can be seen in the before and after plots. + +Here we do use the `fname_aliquot_index` variable in batches. This essentially splits by underscore and takes the Xth value + +Example for exp2 where fname_aliquot_index is 6: + +- `D:\DMS_WorkDir1\cNF_organoid_exp2_DIA_G_**01**_01May25_Romulus_BEHCoA-25-02-16.mzML`: 01 +- `D:\DMS_WorkDir1\cNF_organoid_exp2_DIA_G_**02**_01May25_Romulus_BEHCoA-25-02-16.mzML`: 02 + + +```{r global, message=FALSE, warning=FALSE, fig.height=3.5, fig.width=6, out.width="50%"} +global_batches <- list( + list(syn_id = "syn69947355", cohort = 1, value_start_col = 5, fname_aliquot_index = 5), + list(syn_id = "syn69947352", cohort = 2, value_start_col = 5, fname_aliquot_index = 6) +) + +global <- run_modality( + modality = "Global", + batches = global_batches, + meta = meta, + syn = syn, + drop_name_substrings = drop_subs, + out_dir = "global_test", + out_prefix = "global", + upload_parent_id = "syn70078365", + pcols = pcols, + write_outputs = FALSE, # This has already been run so we don't need to write it again unless something changes. + save_basename = "global_batch12_corrected", + do_batch_correct = TRUE +) + + +``` + +Above we can see that the batch correction was successfully performed for the global proteomic data. If anything changes or if new batches are added, we can set write_outputs to TRUE and rerun. + +### Phospho Proteomic Batches + +Finally, we run this same process for Phosphoproteomics. Again the run_modality function is used, although the batch info and params are updated. After this code is run, we will see that batch correction was required and that it ran successfully. + +Again we use the `fname_aliquot_index` variable in batches. This essentially splits by underscore and takes the Xth value. + +Example for exp1 where fname_aliquot_index is 8: + +- `I:\UserData\LeDay\Piehowski_orgonoids_Feb25\RawData\1338217_cNF_organoid_DIA_P_**04**_29Jan25_Ned_BEHCoA-25-01-02.raw`: 04 +- `I:\UserData\LeDay\Piehowski_orgonoids_Feb25\RawData\1338241_cNF_organoid_DIA_P_**01**_29Jan25_Ned_BEHCoA-25-01-02.raw`: 01 + + + +```{r phospho, message=FALSE, warning=FALSE, fig.height=3.5, fig.width=6, out.width="50%"} +phospho_batches <- list( + list(syn_id = "syn69963552", cohort = 1, value_start_col = 7, fname_aliquot_index = 8), + list(syn_id = "syn69947351", cohort = 2, value_start_col = 7, fname_aliquot_index = 9) +) + +phospho <- run_modality( + modality = "Phospho", + batches = phospho_batches, + meta = meta, + syn = syn, + drop_name_substrings = drop_subs, + out_dir = "phospho_test", + out_prefix = "phospho", + upload_parent_id = "syn70078365", + pcols = pcols, + write_outputs = FALSE, # This has already been run so we don't need to write it again unless something changes. + save_basename = "phospho_batch12_corrected", + do_batch_correct = TRUE +) + +``` + diff --git a/analysis/01_run_normalize_omics.html b/analysis/01_run_normalize_omics.html new file mode 100644 index 0000000..95d1a79 --- /dev/null +++ b/analysis/01_run_normalize_omics.html @@ -0,0 +1,677 @@ + + + + + + + + + + + + + + + +Normalize Omics And Batch Correct + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+

Purpose

+

This notebook is designed to process multiple batches from +transcriptomics, proteomics, and phosphoproteomics data.

+

In its simplest form, it will load the data in from synapse and join +it together in a single matrix. Additional options include batch +correction (needed for global proteomics and phosphoproteomics), batch +correction plots (before and after), and uploading the data back to +synapse.

+
+
+

Get Helper Scripts

+

These scripts contain the functions that we will call below.
+- 00_cNF_helper_code.R is a basic script that pulls data and sets colors +for samples.
+- 01_normalize_batchcorrect_omics.R contains all of the code for +retrieving batches from synapse, performing batch correction, plotting, +and uploading.

+
library(synapser)
+syn <- list(get = synapser::synGet, store = synapser::synStore)
+
+# Load helper metadata (00_cNF_helper_code.R defines 'meta' and 'pcols')
+source("../source/00_cNF_helper_code.R")
+
+# Source the pipeline
+source("../source/01_normalize_batchcorrect_omics.R")
+
+
+

Run Batch Correction/Normalization across RNA, Global and Phospho +Data.

+
+

Clean Data

+

Here are the IDs of several samples that were used for protocol +optimization but not designed as experimental samples. We want to remove +these from the subsequent batch correction steps as they could skew the +“real” results. - Add to this list if more protocol optimization, +contaminated samples or anything else we want to remove is present.

+
# Substrings to drop (These were the protocol optimization samples)
+drop_subs <- c(
+  "cNF_organoid_DIA_G_02_11Feb25",
+  "cNF_organoid_DIA_G_05_11Feb25",
+  "cNF_organoid_DIA_G_06_11Feb25",
+  "cNF_organoid_DIA_P_02_29Jan25",
+  "cNF_organoid_DIA_P_05_11Feb25",
+  "cNF_organoid_DIA_P_06_11Feb25"
+)
+
+
+

The Main Function

+

We use run_modality() to perform all of the described +steps.

+

Brief Summary:

+
    +
  • Input: We assign batches to the synapse files using the +batches vector along with several other descriptive details +/ params.
    +
  • +
  • Output: We get out a combined matrix of data that is (optionally) +batch corrected.
  • +
+

Reference guide:

+
+
Inputs
+
    +
  • modality: “global”, “phospho”, or “rna”

  • +
  • batches : list of batch configs, e.g. 

    +
      +
    • list( list(syn_id=“syn…”, cohort=1, value_start_col=5, +fname_aliquot_index=8), list(syn_id=“syn…”, cohort=2, value_start_col=5, +fname_aliquot_index=9) )

    • +
    • Required per batch:

      +
        +
      • syn_id : Synapse file ID for the wide feature×sample table
        +
      • +
      • cohort : cohort/batch label (used for joining + ComBat)
        +
      • +
      • fname_aliquot_index: token index (split on “_“) used to parse +aliquot from sample headers
      • +
    • +
    • Optional per batch:

      +
        +
      • value_start_col: first sample column index (auto-detected if +omitted)
      • +
    • +
  • +
  • meta: sample metadata joined by (aliquot, cohort); used to +populate Specimen/Patient/Tumor
    +

  • +
  • syn: synapser client

  • +
+
+
+
Options
+
    +
  • drop_name_substrings: regex pattern(s) to drop unwanted sample +columns
    +
  • +
  • out_dir / out_prefix / save_basename: control output file +locations/names
    +
  • +
  • write_outputs: write CSV/PDF (+upload) vs return in-memory +only
    +
  • +
  • upload_parent_id: Synapse folder/project ID for uploads
    +
  • +
  • pcols: optional named colors for PCA (Patient -> color)
    +
  • +
  • do_batch_correct: run ComBat (TRUE) or skip (FALSE)
  • +
+
+
+
What it does
+
    +
  • Normalizes each batch (per modality), combines batches on shared +features, optionally runs ComBat by cohort, then exports long tables + +PCA/hist plots.
  • +
+
+
+
Returns
+
    +
  • SummarizedExperiments: se_batches, se_combined, se_post (and +se_corrected if ComBat ran)
    +
  • +
  • Long tables: long_pre, long_post
    +
  • +
  • PCA data + plots: pca_df_pre/post, plots
  • +
+
+
+
+

RNA Batches

+

I am starting with the RNA batches as these are the simplest ones, +not actually requiring batch correction (seen in the plot titled “rna +samples” after the code runs).

+

I’m going to describe the batches vector/argument that +is required by the run_modality script here.

+
    +
  • A different version of this is needed when more batches are present, +omics changes, or the files are modified.
    +
  • +
  • Each file (even within omics type) may be formatted slightly +differently so value_start_col and +fname_aliquot_index may need to be set differently between +batches.
  • +
+

For the rna_batches vector, files are formatted identically so the +value_start_col are both set to 3 and +fname_aliquot_index is NULL.

+
    +
  • value_start_col: First column where data +begins.
    +
  • +
  • fname_aliquot_index: aliquot index (not used in +RNA)
    +
  • +
  • cohort: this variable is used to set the batch +number.
  • +
+

This code is also written to handle different name formats in the +headers such as NF0018.T1.organoid, NF0018.T1.tissue, NF0018.skin, +NF0018.T2.organoids, NF0018.T2, NF0018.T3.organoids, NF0018.T3. ie: +organoid vs organoids, or mixed up capitalization.

+
rna_batches <- list(
+  list(syn_id = "syn66352931", cohort = 1, value_start_col = 3, fname_aliquot_index = NULL),
+  list(syn_id = "syn70765053", cohort = 2, value_start_col = 3, fname_aliquot_index = NULL)
+)
+
+rna <- run_modality(
+  modality = "rna",
+  batches  = rna_batches,
+  meta     = meta,
+  syn      = syn,
+  drop_name_substrings = drop_subs,
+  out_dir          = "rna_test",
+  out_prefix       = "rna",
+  upload_parent_id = "syn71099587",
+  pcols            = pcols,
+  write_outputs    = FALSE,     # This has already been run so we don't need to write it again unless something changes.
+  save_basename    = "RNA_12_no_batch_correct",
+  do_batch_correct = FALSE      #Note this is set to false right now. Doesn't appear to be needed for RNA 
+)
+

+In the plots above, we see that no batch correction was required or +performed.

+
+
+

Global Proteomic Batches

+

Okay, now we are moving on to global proteomics. This is run in the +same functional method as the RNA data. We set our batch info, assign +modality and other basic info in run_modality and then we +run it. Again, we don’t upload the data (by default atleast). However, +we do batch correct here - the reason why can be seen in the before and +after plots.

+

Here we do use the fname_aliquot_index variable in +batches. This essentially splits by underscore and takes the Xth +value

+

Example for exp2 where fname_aliquot_index is 6:

+
    +
  • D:\DMS_WorkDir1\cNF_organoid_exp2_DIA_G_**01**_01May25_Romulus_BEHCoA-25-02-16.mzML: +01
    +
  • +
  • D:\DMS_WorkDir1\cNF_organoid_exp2_DIA_G_**02**_01May25_Romulus_BEHCoA-25-02-16.mzML: +02
  • +
+
global_batches <- list(
+  list(syn_id = "syn69947355", cohort = 1, value_start_col = 5, fname_aliquot_index = 5),
+  list(syn_id = "syn69947352", cohort = 2, value_start_col = 5, fname_aliquot_index = 6)
+)
+
+global <- run_modality(
+  modality = "Global",
+  batches  = global_batches,
+  meta     = meta,
+  syn      = syn,
+  drop_name_substrings = drop_subs,
+  out_dir          = "global_test",
+  out_prefix       = "global",
+  upload_parent_id = "syn70078365",
+  pcols            = pcols,
+  write_outputs    = FALSE,     # This has already been run so we don't need to write it again unless something changes.
+  save_basename    = "global_batch12_corrected",
+  do_batch_correct = TRUE
+)
+

+
## Found 2 genes with uniform expression within a single batch (all zeros); these will not be adjusted for batch.
+

+

Above we can see that the batch correction was successfully performed +for the global proteomic data. If anything changes or if new batches are +added, we can set write_outputs to TRUE and rerun.

+
+
+

Phospho Proteomic Batches

+

Finally, we run this same process for Phosphoproteomics. Again the +run_modality function is used, although the batch info and params are +updated. After this code is run, we will see that batch correction was +required and that it ran successfully.

+

Again we use the fname_aliquot_index variable in +batches. This essentially splits by underscore and takes the Xth +value.

+

Example for exp1 where fname_aliquot_index is 8:

+
    +
  • I:\UserData\LeDay\Piehowski_orgonoids_Feb25\RawData\1338217_cNF_organoid_DIA_P_**04**_29Jan25_Ned_BEHCoA-25-01-02.raw: +04
    +
  • +
  • I:\UserData\LeDay\Piehowski_orgonoids_Feb25\RawData\1338241_cNF_organoid_DIA_P_**01**_29Jan25_Ned_BEHCoA-25-01-02.raw: +01
  • +
+
phospho_batches <- list(
+  list(syn_id = "syn69963552", cohort = 1, value_start_col = 7, fname_aliquot_index = 8),
+  list(syn_id = "syn69947351", cohort = 2, value_start_col = 7, fname_aliquot_index = 9)
+)
+
+phospho <- run_modality(
+  modality = "Phospho",
+  batches  = phospho_batches,
+  meta     = meta,
+  syn      = syn,
+  drop_name_substrings = drop_subs,
+  out_dir          = "phospho_test",
+  out_prefix       = "phospho",
+  upload_parent_id = "syn70078365",
+  pcols            = pcols,
+  write_outputs    = FALSE, # This has already been run so we don't need to write it again unless something changes.
+  save_basename    = "phospho_batch12_corrected",
+  do_batch_correct = TRUE
+)
+

+
+
+ + + + +
+ + + + + + + + + + + + + + + diff --git a/analysis/02_analyze_modality.Rmd b/analysis/02_analyze_modality.Rmd new file mode 100644 index 0000000..1f5611b --- /dev/null +++ b/analysis/02_analyze_modality.Rmd @@ -0,0 +1,188 @@ +--- +title: "Analyze Modality Correlations" +author: "Jeremy Jacobson" +date: "`r format(Sys.Date(), '%B %d, %Y')`" +--- + +### Purpose + +This notebook is designed to take the drug viability data and the long-format omics tables (RNA, global, and phospho), then compute and plot correlations between drug response and molecular features. These long-format omics tables are batch corrected when necessary and have been returned from the Normalize Omics Notebook. + +The `analyze_modality` will load the inputs from Synapse, build the matrices, save the plots, and return the correlation results so we can quickly inspect them. This markdown file also uses a `pdf_to_png_if_possible` function to help display the plots directly in the knitted markdown file. + +### Get Helper Scripts + +These scripts contain the functions that we will call below. + +- 00_cNF_helper_code.R is a basic helper script (Synapse helpers, metadata, and shared utilities). +- 02_analyze_modality_correlations.R contains the code for building matrices, plotting summaries, and computing correlations. + +```{r initiate, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) + +#Set your directory to the cNFDrugScreening base directory if source is not working +# knitr::opts_knit$set(root.dir = "path_to_cNFDrugScreening") +``` + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) + +source("../source/00_cNF_helper_code.R") +source("../source/02_analyze_modality_correlations.R") +``` + +## Load in Data + +These are the four main inputs we need. Drug viability plus the three long-format omics tables. + +```{r load, echo=TRUE, results='hide', message=FALSE, warning=FALSE, cache=TRUE} +# Drugs +drugs <- readr::read_tsv(synGet("syn69947322")$path) + +# RNA +rlong <- readr::read_csv(synGet("syn71333780")$path) + +# Global +glong <- readr::read_csv(synGet("syn70078416")$path) + +# Phospho +plong <- readr::read_csv(synGet("syn70078415")$path) +``` + +## Run correlations per modality + +Below, we run the same workflow three times, once per modality. The only thing that changes is which omics table we use and which column identifies the feature. + +The RNA step calls `analyze_modality()` (end-to-end) which: +- Builds a sample x drug matrix and a sample x feature matrix. +- Saves drug summary plots to the output directory (most_efficacious, most_variable, and the heatmap). +- Computes Spearman correlations between drug response and molecular features. +- Returns the results as a list so we can inspect them directly in R. + +For global and phospho, we call `analyze_modality_correlations()` only, which: +- Reuses the drug matrix from the RNA run. +- Builds the sample x feature matrix for that modality. +- Computes Spearman correlations between drug response and molecular features. +- Saves the correlation feature summary plot. + +A few plot notes (these get written to `outdir`): +- **most_efficacious.pdf**: drugs with the lowest mean viability across samples. This is basically a "top hits" plot. +- **most_variable.pdf**: drugs with the highest variability across samples. These are the ones that may be more sample-dependent. +- **drug_heatmap_large_viability.pdf**: a large sample-by-drug heatmap, but only for drugs measured in all samples. This is useful for clustering and spotting outliers. Samples/patients are separated by lines, within each are 1-3 tumors. +- **cor_features_by_drug.pdf**: counts of significant correlated features per drug (split by direction). This is a quick way to see which drugs have real signal in this modality. + +**Note**: `most_efficacious`, `most_variable`, and `drug_heatmap_large_viability` are based on the drug response table, not the omics table. +They get recreated on every end-to-end run, but they are the same plots. Because of that, we run the end-to-end workflow once (RNA), and then for global/phospho we only run the modality-only correlation step. + +**Note 2:** The end-to-end `analyze_modality` function may take a long time to run (~30-45 min). + +### RNA + +```{r rna, echo=TRUE, results='hide', message=FALSE, warning=FALSE, cache=TRUE} +corr_rna <- analyze_modality( + fits = drugs, + df_long = rlong, + sample_col = "Specimen", + feature_col = "feature_id", + value_col = "correctedAbundance", + metric = "uM_viability", + heatmap_filename = "rna_drug_heatmap_large_viability.pdf", + outdir = "new_figs" +) + +# Copy the shared filenames to RNA-specific names so later modalities don't overwrite what we want to show here +dir.create("new_figs", showWarnings = FALSE, recursive = TRUE) +file.copy("new_figs/most_efficacious.pdf", "new_figs/rna_most_efficacious.pdf", overwrite = TRUE) +file.copy("new_figs/most_variable.pdf", "new_figs/rna_most_variable.pdf", overwrite = TRUE) +file.copy("new_figs/cor_features_by_drug.pdf", "new_figs/rna_cor_features_by_drug.pdf", overwrite = TRUE) +``` + + +For RNA, the “features” are the rows we test in the correlation step, and they come directly from the `feature_id` column in the RNA long table (`rlong`). In most cases `feature_id` is a gene ID (or a transcript ID, depending on how the RNA file was generated). The workflow builds a sample × feature matrix from RNA, then correlates every RNA feature against drug viability for each drug. + +If a drug shows a large number of significant RNA correlations (after FDR filtering), that is a strong sign the RNA signal is tracking response for that drug. In other words, the transcriptome is separating the sensitive vs resistant samples for that drug in a consistent way. + +Below are the plots I show right after running RNA. This is the only place I show the drug-only plots, because those are based on the drug response table and are essentially the same no matter which omics table we are using. + +**Positive Correlation** indicates a **higher viability** which means that there is **lower effect** of the drug / **higher resistance** to the drug. + +**Negative Correlation** indicates a **lower viability** which means that there is **greater effect** of the drug / **higher sensitivity** to the drug. + +Metric used: `uM_viability` + +Files shown for RNA: +- new_figs/rna_most_efficacious.pdf +- new_figs/rna_most_variable.pdf +- new_figs/rna_drug_heatmap_large_viability.pdf +- new_figs/rna_cor_features_by_drug.pdf + + +```{r rna_plots, echo=FALSE, message=FALSE, warning=FALSE, fig.height=6.5, fig.width=10, out.width="95%", cache=TRUE} + +show_plots(c( + "new_figs/rna_most_efficacious.pdf", + "new_figs/rna_most_variable.pdf", + "new_figs/rna_drug_heatmap_large_viability.pdf", + "new_figs/rna_cor_features_by_drug.pdf" +), dpi = 220) +``` + +### Global Proteomics + +```{r global, echo=TRUE, results='hide', message=FALSE, warning=FALSE, cache=TRUE} +corr_global <- analyze_modality_correlations( + df_long = glong, + sample_col = "Specimen", + feature_col = "Gene", + value_col = "correctedAbundance", + drug_mat = corr_rna$drug_mat, + outdir = "new_figs", + fdr_thresh = 0.25 +) + +# Copy the shared filenames to global-specific names +file.copy("new_figs/cor_features_by_drug.pdf", "new_figs/global_cor_features_by_drug.pdf", overwrite = TRUE) +``` + +For global proteomics, the features are proteins. In theory, these may be closer to the phenotype than RNA. + +Global correlation plot + +```{r global_plots, echo=FALSE, message=FALSE, warning=FALSE, fig.height=6.5, fig.width=10, out.width="95%", cache=TRUE} +show_plots(c( + "new_figs/global_cor_features_by_drug.pdf" +), dpi = 220) +``` + +### Phospho Proteomics + +```{r phospho, echo=TRUE, results='hide', message=FALSE, warning=FALSE, cache=TRUE} +corr_phospho <- analyze_modality_correlations( + df_long = plong, + sample_col = "Specimen", + feature_col = "site", + value_col = "correctedAbundance", + drug_mat = corr_rna$drug_mat, + outdir = "new_figs", + fdr_thresh = 0.25 +) + +# Copy the shared filenames to phospho-specific names +file.copy("new_figs/cor_features_by_drug.pdf", "new_figs/phospho_cor_features_by_drug.pdf", overwrite = TRUE) +``` + +For phospho, the features are phosphorylation sites. It is normal for this to be noisier and higher-dimensional, but when a drug shows a cluster of correlated sites, it can be really informative for the mechanism - however, there is just one correlation here with 2 features. + +Here is the phospho correlation plot - for some reason, this one doesn't always knit correctly, so I'm adding the table too. + +```{r phospho_plots, echo=FALSE, message=FALSE, warning=FALSE, fig.height=6.5, fig.width=10, out.width="95%", cache=TRUE} +show_plots(c( + "new_figs/phospho_cor_features_by_drug.pdf" +), dpi = 220) +``` + + + +```{r} +corr_phospho$cor_summary +``` diff --git a/analysis/02_analyze_modality.html b/analysis/02_analyze_modality.html new file mode 100644 index 0000000..a976974 --- /dev/null +++ b/analysis/02_analyze_modality.html @@ -0,0 +1,598 @@ + + + + + + + + + + + + + + + +Analyze Modality Correlations + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+

Purpose

+

This notebook is designed to take the drug viability data and the +long-format omics tables (RNA, global, and phospho), then compute and +plot correlations between drug response and molecular features. These +long-format omics tables are batch corrected when necessary and have +been returned from the Normalize Omics Notebook.

+

The analyze_modality will load the inputs from Synapse, +build the matrices, save the plots, and return the correlation results +so we can quickly inspect them. This markdown file also uses a +pdf_to_png_if_possible function to help display the plots +directly in the knitted markdown file.

+
+
+

Get Helper Scripts

+

These scripts contain the functions that we will call below.

+ +
+
+

Load in Data

+

These are the four main inputs we need. Drug viability plus the three +long-format omics tables.

+
# Drugs
+drugs <- readr::read_tsv(synGet("syn69947322")$path)
+
+# RNA 
+rlong <- readr::read_csv(synGet("syn71333780")$path)
+
+# Global
+glong <- readr::read_csv(synGet("syn70078416")$path)
+
+# Phospho
+plong <- readr::read_csv(synGet("syn70078415")$path)
+
+
+

Run correlations per modality

+

Below, we run the same workflow three times, once per modality. The +only thing that changes is which omics table we use and which column +identifies the feature.

+

The RNA step calls analyze_modality() (end-to-end) +which:
+- Builds a sample x drug matrix and a sample x feature matrix.
+- Saves drug summary plots to the output directory (most_efficacious, +most_variable, and the heatmap).
+- Computes Spearman correlations between drug response and molecular +features.
+- Returns the results as a list so we can inspect them directly in +R.

+

For global and phospho, we call +analyze_modality_correlations() only, which:
+- Reuses the drug matrix from the RNA run.
+- Builds the sample x feature matrix for that modality.
+- Computes Spearman correlations between drug response and molecular +features.
+- Saves the correlation feature summary plot.

+

A few plot notes (these get written to outdir):
+- most_efficacious.pdf: drugs with the lowest mean +viability across samples. This is basically a “top hits” plot.
+- most_variable.pdf: drugs with the highest variability +across samples. These are the ones that may be more +sample-dependent.
+- drug_heatmap_large_viability.pdf: a large +sample-by-drug heatmap, but only for drugs measured in all samples. This +is useful for clustering and spotting outliers. Samples/patients are +separated by lines, within each are 1-3 tumors.
+- cor_features_by_drug.pdf: counts of significant +correlated features per drug (split by direction). This is a quick way +to see which drugs have real signal in this modality.

+

Note: most_efficacious, +most_variable, and +drug_heatmap_large_viability are based on the drug response +table, not the omics table.
+They get recreated on every end-to-end run, but they are the same plots. +Because of that, we run the end-to-end workflow once (RNA), and then for +global/phospho we only run the modality-only correlation step.

+

Note 2: The end-to-end analyze_modality +function may take a long time to run (~30-45 min).

+
+

RNA

+
corr_rna <- analyze_modality(
+  fits        = drugs,            
+  df_long     = rlong,           
+  sample_col  = "Specimen",
+  feature_col = "feature_id",
+  value_col   = "correctedAbundance",
+  metric      = "uM_viability",
+  heatmap_filename = "rna_drug_heatmap_large_viability.pdf",
+  outdir = "new_figs"
+)
+
+# Copy the shared filenames to RNA-specific names so later modalities don't overwrite what we want to show here
+dir.create("new_figs", showWarnings = FALSE, recursive = TRUE)
+file.copy("new_figs/most_efficacious.pdf", "new_figs/rna_most_efficacious.pdf", overwrite = TRUE)
+file.copy("new_figs/most_variable.pdf",   "new_figs/rna_most_variable.pdf",   overwrite = TRUE)
+file.copy("new_figs/cor_features_by_drug.pdf", "new_figs/rna_cor_features_by_drug.pdf", overwrite = TRUE)
+

For RNA, the “features” are the rows we test in the correlation step, +and they come directly from the feature_id column in the +RNA long table (rlong). In most cases +feature_id is a gene ID (or a transcript ID, depending on +how the RNA file was generated). The workflow builds a sample × feature +matrix from RNA, then correlates every RNA feature against drug +viability for each drug.

+

If a drug shows a large number of significant RNA correlations (after +FDR filtering), that is a strong sign the RNA signal is tracking +response for that drug. In other words, the transcriptome is separating +the sensitive vs resistant samples for that drug in a consistent +way.

+

Below are the plots I show right after running RNA. This is the only +place I show the drug-only plots, because those are based on the drug +response table and are essentially the same no matter which omics table +we are using.

+

Positive Correlation indicates a higher +viability which means that there is lower +effect of the drug / higher resistance to the +drug.

+

Negative Correlation indicates a lower +viability which means that there is greater +effect of the drug / higher sensitivity to the +drug.

+

Metric used: uM_viability

+

Files shown for RNA:
+- new_figs/rna_most_efficacious.pdf
+- new_figs/rna_most_variable.pdf
+- new_figs/rna_drug_heatmap_large_viability.pdf
+- new_figs/rna_cor_features_by_drug.pdf

+
## Converting page 1 to rna_most_efficacious_1.png... done!
+## Converting page 1 to rna_most_variable_1.png... done!
+## Converting page 1 to rna_drug_heatmap_large_viability_1.png... done!
+## Converting page 1 to rna_cor_features_by_drug_1.png... done!
+

+
+
+

Global Proteomics

+
corr_global <- analyze_modality_correlations(
+  df_long     = glong,           
+  sample_col  = "Specimen",
+  feature_col = "Gene",
+  value_col   = "correctedAbundance",
+  drug_mat    = corr_rna$drug_mat,
+  outdir      = "new_figs",
+  fdr_thresh  = 0.25
+)
+
+# Copy the shared filenames to global-specific names
+file.copy("new_figs/cor_features_by_drug.pdf", "new_figs/global_cor_features_by_drug.pdf", overwrite = TRUE)
+

For global proteomics, the features are proteins. In theory, these +may be closer to the phenotype than RNA.

+

Global correlation plot

+
## Converting page 1 to global_cor_features_by_drug_1.png... done!
+

+
+
+

Phospho Proteomics

+
corr_phospho <- analyze_modality_correlations(
+  df_long     = plong,           
+  sample_col  = "Specimen",
+  feature_col = "site",
+  value_col   = "correctedAbundance",
+  drug_mat    = corr_rna$drug_mat,
+  outdir      = "new_figs",
+  fdr_thresh  = 0.25
+)
+
+# Copy the shared filenames to phospho-specific names
+file.copy("new_figs/cor_features_by_drug.pdf", "new_figs/phospho_cor_features_by_drug.pdf", overwrite = TRUE)
+

For phospho, the features are phosphorylation sites. It is normal for +this to be noisier and higher-dimensional, but when a drug shows a +cluster of correlated sites, it can be really informative for the +mechanism - however, there is just one correlation here with 2 +features.

+

Here is the phospho correlation plot - for some reason, this one +doesn’t always knit correctly, so I’m adding the table too.

+
## Converting page 1 to phospho_cor_features_by_drug_1.png... done!
+

+
corr_phospho$cor_summary
+
## # A tibble: 5 × 4
+##   drug        direction features meanCor
+##   <chr>       <chr>        <int>   <dbl>
+## 1 Daporinad   pos              1   0.847
+## 2 ML 210      neg              1  -0.853
+## 3 NVP-BEZ235  pos              2   0.864
+## 4 Tofacitinib pos              1   0.868
+## 5 Tovorafenib pos              1   0.891
+
+
+ + + + +
+ + + + + + + + + + + + + + + diff --git a/analysis/03_pathway_enrichment.Rmd b/analysis/03_pathway_enrichment.Rmd new file mode 100644 index 0000000..fad1c1a --- /dev/null +++ b/analysis/03_pathway_enrichment.Rmd @@ -0,0 +1,224 @@ +--- +title: "Pathway Enrichment" +author: "Jeremy Jacobson" +date: "`r format(Sys.Date(), '%B %d, %Y')`" +--- + +### Purpose + +This script runs pathway enrichment using leapR, using the drug viability data plus the long-format omics tables (RNA, global, and phospho). +These long-format omics tables are batch corrected when necessary and have been returned from the Normalize Omics Notebook. + +The main idea is: +- Split samples into "sensitive" vs "resistant" for each drug (based on viability). +- Run enrichment on each side. +- Save the enrichment tables and plots so we can quickly scan which pathways show up for which drugs. + +This file is written to be simple and repeatable. It loads the inputs from Synapse, runs the same workflow per modality, and stores results +in a cache file so we do not re-run heavy work unless we need to. + +**Note**: This will take a couple of hours to run the first time. **Do not overwrite the cache** unless completely necessary. + +### Get Helper Scripts + +These scripts contain the functions that we will call below. + +- 00_cNF_helper_code.R is a basic helper script (Synapse helpers, metadata, and shared utilities). +- 03_leapr_biomarker.R contains the leapR wrapper functions, caching, and plotting helpers. + +```{r initiate, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) + +#Set your directory to the cNFDrugScreening base directory if source is not working +# knitr::opts_knit$set(root.dir = "path_to_cNFDrugScreening") +``` + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) + +source("../source/00_cNF_helper_code.R") +source("../source/03_leapr_biomarker.R") +``` + +## Load in Data + +These are the four main inputs we need - just like in analyze_modality. Drug viability plus the three long-format omics tables. + +```{r, echo=TRUE, results='hide', message=FALSE, warning=FALSE, cache=TRUE} +# Drugs +drugs <- readr::read_tsv(synGet("syn69947322")$path) + +# RNA +rlong <- readr::read_csv(synGet("syn71333780")$path) + +# Global +glong <- readr::read_csv(synGet("syn70078416")$path) + +# Phospho +plong <- readr::read_csv(synGet("syn70078415")$path) +``` + +## Run Pathway Enrichment for Each Omics Modality + +Below, we run the same workflow three times, once per (omics) modality. The main things that change are: +- which omics table we use +- which column represents the feature +- which geneset library we test (krbpaths vs kinasesubstrates) + +Each call to `run_leapr_directional_one_cached()` does a few things: +- Splits samples into "sensitive" vs "resistant" for each drug (based on viability). +- Runs enrichment for each drug and direction. +- Writes CSV outputs (if `write_csvs = TRUE`). +- Saves an .Rdata cache so future runs are fast unless inputs or settings change. + +One note about plots: `run_leapr_directional_one_cached()` computes the enrichment results, but it does not automatically save the pathway barplots. +Those are generated by `save_leapr_plots()`. In this Rmd, I intentionally save plots for just **mirda** and **olaparib** so we do not generate hundreds of drug PDFs by accident. + +**So, If you want more plots, just add more drugs to the `drugs = c(...)` list for each modality.** + +### RNA (krbpaths). + +For RNA, the features come from the `feature_id` column in the RNA long table (`rlong`). In most cases, this is a gene or transcript ID. +Here we use the `krbpaths` gene set collection. + +```{r rna, echo=TRUE, results='hide', message=FALSE, warning=FALSE, cache=TRUE} +res_bio_rna <- run_leapr_directional_one_cached( + drugs = drugs, + df_long = rlong, + sample_col = "Specimen", + feature_col = "feature_id", + value_col = "correctedAbundance", + omic_label = "rna", + cache_path = "../leapR_RNA_krbpaths_enrichment_direction_split.Rdata", + write_csvs = TRUE, + always_rerun = FALSE, + test_one = FALSE, + geneset_name = "krbpaths" +) + +# This is where pathway plot PDFs get created. +# I keep the list short on purpose so the report stays readable. +save_leapr_plots( + res_bio_rna, + omic_label = "rna", + drugs = c("Mirdametinib", "olaparib"), + top_n = 15 +) +``` + +Below are the RNA pathway plots created by `save_leapr_plots()`. These files get written to `figs/`. + +```{r rna_plots, echo=FALSE, message=FALSE, warning=FALSE, fig.height=6.5, fig.width=10, out.width="50%", cache=TRUE} +rna_plot_paths <- c( + "figs/pathways_Mirdametinib_rna_resistant_top15.pdf", + "figs/pathways_Mirdametinib_rna_sensitive_top15.pdf", + "figs/pathways_olaparib_rna_resistant_top15.pdf", + "figs/pathways_olaparib_rna_sensitive_top15.pdf" +) + +show_plots(rna_plot_paths, dpi = 240) +``` + +### Global Proteomics (krbpaths) + +For global proteomics, the features are proteins in the `Gene` column. We keep the same `krbpaths` gene set collection so RNA and global are easier to compare. + +```{r global, echo=TRUE, results='hide', message=FALSE, warning=FALSE, cache=TRUE} +res_bio_global <- run_leapr_directional_one_cached( + drugs = drugs, + df_long = glong, + sample_col = "Specimen", + feature_col = "Gene", + value_col = "correctedAbundance", + omic_label = "global", + cache_path = "../leapR_Global_krbpaths_enrichment_direction_split.Rdata", + write_csvs = TRUE, + always_rerun = FALSE, + test_one = FALSE, + geneset_name = "krbpaths" +) + +save_leapr_plots( + res_bio_global, + omic_label = "global", + drugs = c("Mirdametinib", "olaparib"), + top_n = 15 +) +``` + +Here are the global pathway plots. Same idea as RNA, just using protein abundance instead of RNA abundance. + +```{r global_plots, echo=FALSE, message=FALSE, warning=FALSE, fig.height=6.5, fig.width=10, out.width="50%", cache=TRUE} +global_plot_paths <- c( + "figs/pathways_Mirdametinib_global_resistant_top15.pdf", + "figs/pathways_Mirdametinib_global_sensitive_top15.pdf", + "figs/pathways_olaparib_global_resistant_top15.pdf", + "figs/pathways_olaparib_global_sensitive_top15.pdf" +) + +show_plots(global_plot_paths, dpi = 240) +``` + +### Phospho Proteomics (kinasesubstrates) + +For phospho, the features are phosphorylation sites in the `site` column. Here we use a phospho-specific gene set library, `kinasesubstrates`, +because site-level maps more cleanly to kinase activity than to general pathway collections. + +```{r phospho, echo=TRUE, results='hide', message=FALSE, warning=FALSE, cache=TRUE} +res_bio_phospho <- run_leapr_directional_one_cached( + drugs = drugs, + df_long = plong, + sample_col = "Specimen", + feature_col = "site", + value_col = "correctedAbundance", + omic_label = "phospho", + cache_path = "../leapR_Phospho_kinasesubstrates_enrichment_direction_split.Rdata", + write_csvs = TRUE, + always_rerun = FALSE, + test_one = FALSE, + geneset_name = "kinasesubstrates" +) + +save_leapr_plots( + res_bio_phospho, + omic_label = "phospho", + drugs = c("Mirdametinib", "olaparib"), + top_n = 15 +) +``` + +Here are the phospho pathway plots. For mirdametinib and olaparib, no significant pathways are seen for the phospho data using drug viability. + +```{r phospho_plots, echo=FALSE, message=FALSE, warning=FALSE, fig.height=6.5, fig.width=10, out.width="50%", cache=TRUE} +phospho_plot_paths <- c( + "figs/pathways_Mirdametinib_phospho_resistant_top15.pdf", + "figs/pathways_Mirdametinib_phospho_sensitive_top15.pdf", + "figs/pathways_olaparib_phospho_resistant_top15.pdf", + "figs/pathways_olaparib_phospho_sensitive_top15.pdf" +) + +show_plots(phospho_plot_paths, dpi = 240) +``` + +Overall we can see that the number of drugs with significant pathways vary significantly by omics type. RNA demonstrates 231, Global proteomics has 167, while phosphoproteomics only shows 44 with just 1-4 effected pathways per drug. + +```{r sig_pathways_three_plots_paged, echo=TRUE, message=FALSE, warning=FALSE, fig.width=16, fig.height=6} +alpha <- 0.05 #uses SignedBH_pvalue +page_size <- 85 #number of drugs per plot. If too many, the names will overlap. +plot_sig_pathways_one_omic_paged(res_bio_rna, "rna", alpha = alpha, page_size = page_size) +plot_sig_pathways_one_omic_paged(res_bio_global, "global", alpha = alpha, page_size = page_size) +plot_sig_pathways_one_omic_paged(res_bio_phospho,"phospho",alpha = alpha, page_size = page_size) + +``` + + + + + +## Notes + +- If you want to force a full rerun (ignore cache), set `always_rerun = TRUE`. +- If you only want to test quickly on one drug, set `test_one = TRUE` (useful for debugging). +- If you want plots for more drugs, add them to the `drugs = c(...)` list in each `save_leapr_plots()` call. +- All pathway plot files are saved under `figs/` with names like: + - figs/pathways___{resistant|sensitive}_top.pdf. diff --git a/analysis/03_pathway_enrichment.html b/analysis/03_pathway_enrichment.html new file mode 100644 index 0000000..4dabc37 --- /dev/null +++ b/analysis/03_pathway_enrichment.html @@ -0,0 +1,616 @@ + + + + + + + + + + + + + + + +Pathway Enrichment + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + +
+

Purpose

+

This script runs pathway enrichment using leapR, using the drug +viability data plus the long-format omics tables (RNA, global, and +phospho).
+These long-format omics tables are batch corrected when necessary and +have been returned from the Normalize Omics Notebook.

+

The main idea is:
+- Split samples into “sensitive” vs “resistant” for each drug (based on +viability).
+- Run enrichment on each side.
+- Save the enrichment tables and plots so we can quickly scan which +pathways show up for which drugs.

+

This file is written to be simple and repeatable. It loads the inputs +from Synapse, runs the same workflow per modality, and stores results in +a cache file so we do not re-run heavy work unless we need to.

+

Note: This will take a couple of hours to run the +first time. Do not overwrite the cache unless +completely necessary.

+
+
+

Get Helper Scripts

+

These scripts contain the functions that we will call below.

+
    +
  • 00_cNF_helper_code.R is a basic helper script (Synapse helpers, +metadata, and shared utilities).
    +
  • +
  • 03_leapr_biomarker.R contains the leapR wrapper functions, caching, +and plotting helpers.
  • +
+
+
+

Load in Data

+

These are the four main inputs we need - just like in +analyze_modality. Drug viability plus the three long-format omics +tables.

+
# Drugs
+drugs <- readr::read_tsv(synGet("syn69947322")$path)
+
+# RNA 
+rlong <- readr::read_csv(synGet("syn71333780")$path)
+
+# Global
+glong <- readr::read_csv(synGet("syn70078416")$path)
+
+# Phospho
+plong <- readr::read_csv(synGet("syn70078415")$path)
+
+
+

Run Pathway Enrichment for Each Omics Modality

+

Below, we run the same workflow three times, once per (omics) +modality. The main things that change are:
+- which omics table we use - which column represents the feature - which +geneset library we test (krbpaths vs kinasesubstrates)

+

Each call to run_leapr_directional_one_cached() does a +few things:
+- Splits samples into “sensitive” vs “resistant” for each drug (based on +viability).
+- Runs enrichment for each drug and direction.
+- Writes CSV outputs (if write_csvs = TRUE).
+- Saves an .Rdata cache so future runs are fast unless inputs or +settings change.

+

One note about plots: run_leapr_directional_one_cached() +computes the enrichment results, but it does not automatically save the +pathway barplots.
+Those are generated by save_leapr_plots(). In this Rmd, I +intentionally save plots for just mirda and +olaparib so we do not generate hundreds of drug PDFs by +accident.

+

So, If you want more plots, just add more drugs to the +drugs = c(...) list for each modality.

+
+

RNA (krbpaths).

+

For RNA, the features come from the feature_id column in +the RNA long table (rlong). In most cases, this is a gene +or transcript ID. Here we use the krbpaths gene set +collection.

+
res_bio_rna <- run_leapr_directional_one_cached(
+  drugs        = drugs,
+  df_long      = rlong,
+  sample_col   = "Specimen",
+  feature_col  = "feature_id",
+  value_col    = "correctedAbundance",
+  omic_label   = "rna",
+  cache_path   = "../leapR_RNA_krbpaths_enrichment_direction_split.Rdata",
+  write_csvs   = TRUE,
+  always_rerun = FALSE,
+  test_one     = FALSE,
+  geneset_name = "krbpaths"
+)
+
+# This is where pathway plot PDFs get created.
+# I keep the list short on purpose so the report stays readable.
+save_leapr_plots(
+  res_bio_rna,
+  omic_label = "rna",
+  drugs = c("Mirdametinib", "olaparib"),
+  top_n = 15
+)
+

Below are the RNA pathway plots created by +save_leapr_plots(). These files get written to +figs/.

+
## Converting page 1 to pathways_Mirdametinib_rna_resistant_top15_1.png... done!
+## Converting page 1 to pathways_Mirdametinib_rna_sensitive_top15_1.png... done!
+## Converting page 1 to pathways_olaparib_rna_resistant_top15_1.png... done!
+## Converting page 1 to pathways_olaparib_rna_sensitive_top15_1.png... done!
+

+
+
+

Global Proteomics (krbpaths)

+

For global proteomics, the features are proteins in the +Gene column. We keep the same krbpaths gene +set collection so RNA and global are easier to compare.

+
res_bio_global <- run_leapr_directional_one_cached(
+  drugs        = drugs,
+  df_long      = glong,
+  sample_col   = "Specimen",
+  feature_col  = "Gene",
+  value_col    = "correctedAbundance",
+  omic_label   = "global",
+  cache_path   = "../leapR_Global_krbpaths_enrichment_direction_split.Rdata",
+  write_csvs   = TRUE,
+  always_rerun = FALSE,
+  test_one     = FALSE,
+  geneset_name = "krbpaths"
+)
+
+save_leapr_plots(
+  res_bio_global,
+  omic_label = "global",
+  drugs = c("Mirdametinib", "olaparib"),
+  top_n = 15
+)
+

Here are the global pathway plots. Same idea as RNA, just using +protein abundance instead of RNA abundance.

+
## Converting page 1 to pathways_Mirdametinib_global_resistant_top15_1.png... done!
+## Converting page 1 to pathways_Mirdametinib_global_sensitive_top15_1.png... done!
+## Converting page 1 to pathways_olaparib_global_resistant_top15_1.png... done!
+## Converting page 1 to pathways_olaparib_global_sensitive_top15_1.png... done!
+

+
+
+

Phospho Proteomics (kinasesubstrates)

+

For phospho, the features are phosphorylation sites in the +site column. Here we use a phospho-specific gene set +library, kinasesubstrates, because site-level maps more +cleanly to kinase activity than to general pathway collections.

+
res_bio_phospho <- run_leapr_directional_one_cached(
+  drugs        = drugs,
+  df_long      = plong,
+  sample_col   = "Specimen",
+  feature_col  = "site",
+  value_col    = "correctedAbundance",
+  omic_label   = "phospho",
+  cache_path   = "../leapR_Phospho_kinasesubstrates_enrichment_direction_split.Rdata",
+  write_csvs   = TRUE,
+  always_rerun = FALSE,
+  test_one     = FALSE,
+  geneset_name = "kinasesubstrates"
+)
+
+save_leapr_plots(
+  res_bio_phospho,
+  omic_label = "phospho",
+  drugs = c("Mirdametinib", "olaparib"),
+  top_n = 15
+)
+

Here are the phospho pathway plots. For mirdametinib and olaparib, no +significant pathways are seen for the phospho data using drug +viability.

+
## Converting page 1 to pathways_Mirdametinib_phospho_resistant_top15_1.png... done!
+## Converting page 1 to pathways_Mirdametinib_phospho_sensitive_top15_1.png... done!
+## Converting page 1 to pathways_olaparib_phospho_resistant_top15_1.png... done!
+## Converting page 1 to pathways_olaparib_phospho_sensitive_top15_1.png... done!
+

+

Overall we can see that the number of drugs with significant pathways +vary significantly by omics type. RNA demonstrates 231, Global +proteomics has 167, while phosphoproteomics only shows 44 with just 1-4 +effected pathways per drug.

+
alpha <- 0.05  #uses SignedBH_pvalue
+page_size <- 85 #number of drugs per plot. If too many, the names will overlap.
+plot_sig_pathways_one_omic_paged(res_bio_rna,    "rna",    alpha = alpha, page_size = page_size)
+

+
plot_sig_pathways_one_omic_paged(res_bio_global, "global", alpha = alpha, page_size = page_size)
+

+
plot_sig_pathways_one_omic_paged(res_bio_phospho,"phospho",alpha = alpha, page_size = page_size)
+

+
+
+
+

Notes

+
    +
  • If you want to force a full rerun (ignore cache), set +always_rerun = TRUE.
    +
  • +
  • If you only want to test quickly on one drug, set +test_one = TRUE (useful for debugging).
    +
  • +
  • If you want plots for more drugs, add them to the +drugs = c(...) list in each save_leapr_plots() +call.
    +
  • +
  • All pathway plot files are saved under figs/ with names +like: +
      +
    • figs/pathways_{resistant|sensitive}_top.pdf.
    • +
  • +
+
+ + + + +
+ + + + + + + + + + + + + + + diff --git a/analysis/README.md b/analysis/README.md new file mode 100644 index 0000000..b268a43 --- /dev/null +++ b/analysis/README.md @@ -0,0 +1,40 @@ +# cNF multi-omics analysis pipeline overview +Purpose: cNF batch merging with optional batch correction, drug/omics correlation assessment, pathway enrichment analysis +The notebooks (`.Rmd`) are the primary analysis entry points; the `.R` scripts are sourced helpers. +The `.html` files result from the latest run/knit of the R Markdown files. + +## Quick run order (and what each file sources) + +1) **[01_run_normalize_omics.Rmd](01_run_normalize_omics.Rmd)** + - *Sources:* [../source/00_cNF_helper_code.R](../source/00_cNF_helper_code.R), [../source/01_normalize_batchcorrect_omics.R](../source/01_normalize_batchcorrect_omics.R) + - *Goal:* Per-modality preprocessing/normalization and batch correction (when needed). + - *Outputs (examples):* + - Batch-corrected long tables written to synapse. + - PCA/QC plots for before and after ComBat batch correction (e.g., `globalCorrectedPCA.pdf`, `phosphoCorrectedPCA.pdf`) + +2) **[02_analyze_modality.Rmd](02_analyze_modality.Rmd)** + - *Sources:* [../source/00_cNF_helper_code.R](../source/00_cNF_helper_code.R), [../source/02_analyze_modality_correlations.R](../source/02_analyze_modality_correlations.R) + - *Goal:* Builds the drug response matrix once, then runs per-modality correlations using the long-format omics tables. + - *Outputs (written to `outdir`, examples):* + - Drug-only plots (written once, based on drug response table): + - `most_efficacious.pdf` + - `most_variable.pdf` + - `drug_heatmap_large_viability.pdf` (heatmap of drugs measured in all samples) + - Modality correlation summary plots (written for each omics type): + - `_cor_features_by_drug.pdf` + - *Notes:* + - RNA typically runs the end-to-end wrapper (`analyze_modality()`), producing drug-only + modality outputs. + - Global/phospho typically run modality-only (`analyze_modality_correlations()`), reusing the RNA-derived `drug_mat`. + +3) **[03_pathway_enrichment.Rmd](03_pathway_enrichment.Rmd)** + - *Sources:* [../source/00_cNF_helper_code.R](../source/00_cNF_helper_code.R), [../source/03_leapr_biomarker.R](../source/03_leapr_biomarker.R) + - *Goal:* Direction-aware pathway enrichment (leapR) using correlated features (resistant & sensitive). + - *Outputs (examples):* + - Per-drug pathway barplots (e.g., `pathways____top15.pdf`) + - Summary plots across drugs (e.g., top recurrent pathways) + +## Helper scripts (sourced by notebooks) +- **[00_cNF_helper_code.R](../source/00_cNF_helper_code.R)**: Shared utilities (Synapse helpers, plotting helpers, common metadata). +- **[01_normalize_batchcorrect_omics.R](../source/01_normalize_batchcorrect_omics.R)**: Normalization / Joining Batches / ComBat batch correction code. +- **[02_analyze_modality_correlations.R](../source/02_analyze_modality_correlations.R)**: Drug summary plots, correlations (includes separated drug-only + modality-only functions). +- **[03_leapr_biomarker.R](../source/03_leapr_biomarker.R)**: Directional feature ranking + leapR enrichment + pathway plotting. diff --git a/cNF_Cohort_1_Process_and_Upload.py b/cNF_Cohort_Annotation.py similarity index 100% rename from cNF_Cohort_1_Process_and_Upload.py rename to cNF_Cohort_Annotation.py diff --git a/cNF_helper_code.R b/cNF_helper_code.R deleted file mode 100644 index 0670c6c..0000000 --- a/cNF_helper_code.R +++ /dev/null @@ -1,28 +0,0 @@ -##standard metadata across all cNFs, including colors if possible - - -library(synapser) -synLogin() -syn <- list(get = synapser::synGet, store = synapser::synStore) -library(readxl) - - -meta1 <- readxl::read_xlsx(syn$get('syn65595365')$path) |> - tidyr::separate(Specimen,into=c('Patient','Tumor'),sep='_',remove = FALSE)|> - dplyr::select(Specimen,Patient,Tumor,aliquot)|> - mutate(cohort=1) - -meta2 <- readxl::read_xlsx(syn$get('syn69920464')$path,sheet='Sheet2')|> - tidyr::separate(Specimen,into=c('Patient','Tumor'),sep='_',remove = FALSE)|> - dplyr::select(Specimen,Patient,Tumor,aliquot='SampleAlias')|> - mutate(cohort=2) - -meta <- rbind(meta1,meta2) %>% - filter(!(cohort == 1 & aliquot %in% c(2, 5, 6))) - - -pcols <- c(NF0017='steelblue',NF0021='orange2',NF0019='orchid4', - NF0022='goldenrod4',NF0018='olivedrab',NF0020='darkred', NF0022='tan', - NF0023='darkgrey',NF0025='lightblue',NF0026='yellow3',NF0027='magenta3', - NF0028='lightgreen',NF0031='pink2') - diff --git a/00_createCurveStats.py b/legacy_code/00_createCurveStats.py similarity index 100% rename from 00_createCurveStats.py rename to legacy_code/00_createCurveStats.py diff --git a/01_harmonize_drug_data.Rmd b/legacy_code/01_harmonize_drug_data.Rmd similarity index 97% rename from 01_harmonize_drug_data.Rmd rename to legacy_code/01_harmonize_drug_data.Rmd index 781f78a..179439b 100644 --- a/01_harmonize_drug_data.Rmd +++ b/legacy_code/01_harmonize_drug_data.Rmd @@ -18,7 +18,7 @@ library(tidyr) First we need to downlod the summary stats for each organoid . ```{r download, warning=FALSE} -source('cNF_helper_code.R') +source('00_cNF_helper_code.R') fits<-readr::read_tsv(synGet('syn69947322')$path) diff --git a/02_normalize_harmonize_proteomics.rmd b/legacy_code/02_normalize_harmonize_proteomics.rmd similarity index 100% rename from 02_normalize_harmonize_proteomics.rmd rename to legacy_code/02_normalize_harmonize_proteomics.rmd diff --git a/legacy_code/03_drug_biomarkers.Rmd b/legacy_code/03_drug_biomarkers.Rmd new file mode 100644 index 0000000..2fa4b21 --- /dev/null +++ b/legacy_code/03_drug_biomarkers.Rmd @@ -0,0 +1,1261 @@ +--- +title: "Evaluate drug and omics data for biomarker assessment" +author: "Sara gosline" +date: "2025-03-13" +output: html_document +--- + +This document is designed to be a working document where we can compare approaches to evaluate biomarkers of drug response across patient samples. We are collecting three types of data modalities: 1. RNA Sequencing 2. Global Proteomics 3. Phospho proteomics + +We also have drug sensitivity data (single dose viability, some curves) for many drugs. The question to ask is which molecules can predict drug response across patients? How robust/extendable is this? + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) +library(synapser) +library(ggplot2) +library(dplyr) +library(tidyr) + +``` + +# Pull processed files from previous markdowns + +We have already run the previous scripts and stored data on Synapse + +```{r pull files} +Sys.setenv(SYNAPSE_AUTH_TOKEN="eyJ0eXAiOiJKV1QiLCJraWQiOiJXN05OOldMSlQ6SjVSSzpMN1RMOlQ3TDc6M1ZYNjpKRU9VOjY0NFI6VTNJWDo1S1oyOjdaQ0s6RlBUSCIsImFsZyI6IlJTMjU2In0.eyJhY2Nlc3MiOnsic2NvcGUiOlsidmlldyIsImRvd25sb2FkIiwibW9kaWZ5Il0sIm9pZGNfY2xhaW1zIjp7fX0sInRva2VuX3R5cGUiOiJQRVJTT05BTF9BQ0NFU1NfVE9LRU4iLCJpc3MiOiJodHRwczovL3JlcG8tcHJvZC5wcm9kLnNhZ2ViYXNlLm9yZy9hdXRoL3YxIiwiYXVkIjoiMCIsIm5iZiI6MTc1ODMyNTgwMiwiaWF0IjoxNzU4MzI1ODAyLCJqdGkiOiIyNjE1MSIsInN1YiI6IjM0NTM5NTUifQ.VcGcVRv0P50Cb6mm7B7hGxzcWdxG4TvMhq8lRZDNEgktWNxdhMA0zacJ1jeOEilfEI-9RRpA7jE2WIM3zjIgYTL-l-UobBMKnvL_gu6itQuf2DyKR6K9OBQJER4cy7N0o6_4qwq5YPflpF6uWuvgAfskuPmQH8Yz9Z80UjeLxFw2yKUJcvtanghAWwFerOEJxxb-PDxHHC6gM_VK-HGGprPQy9_Z33dCZYcmrDbCgV5rWUV5AdTyCHhDBrx4YMw43J2U7os88SPpEEmbvxVpfSFTusOsP1FzYn7ifnpw2t6Ip5ZwLmwShBViShMlTe9X0tgjO5htY5UdXyJBMVK0Qw") + +source("cNF_helper_code.R") +traceback() + +source('cNF_helper_code.R') +##read in drug code +fits <- readr::read_tsv(synGet('syn69947322')$path) + + +##read in proteomic data +glong <- readr::read_csv(synGet('syn70078416')$path) +plong <- readr::read_csv(synGet('syn70078415')$path) + + +##read in transcrniptomic data +#TODO: process transcritpomic data into long format + + +``` + +## Format protein data to collect correlation values + +Do simple correlations to identify putative trends in the data. + +Get most efficacious, variable, and heatmap + +```{r} +# ensure an output folder + +outdir <- "figs" +if (!dir.exists(outdir)) dir.create(outdir, recursive = TRUE) + +shared <- intersect(fits$improve_sample_id, glong$Specimen) +message(sprintf("Found %d shared samples from %d drug experiments and %d proteomic experiments", +length(shared), length(unique(fits$improve_sample_id)), length(unique(glong$Specimen)))) + +glob_dat <- glong |> +ungroup() |> +subset(Specimen %in% shared) |> +dplyr::select(Specimen, Gene, correctedAbundance) |> +tidyr::pivot_wider( +names_from = "Gene", +values_from = "correctedAbundance", +values_fill = 0, +values_fn = mean +) |> +tibble::column_to_rownames("Specimen") + +phos_dat <- plong |> +ungroup() |> +subset(Specimen %in% shared) |> +dplyr::select(Specimen, site, correctedAbundance) |> +tidyr::pivot_wider( +names_from = "site", +values_from = "correctedAbundance", +values_fill = 0, +values_fn = mean +) |> +tibble::column_to_rownames("Specimen") + +## drug data to matrix here + +drug_dat <- fits |> +subset(dose_response_metric == "uM_viability") |> +dplyr::select(improve_sample_id, improve_drug_id, dose_response_value) |> +tidyr::pivot_wider( +names_from = "improve_drug_id", +values_from = "dose_response_value", +values_fn = mean +) |> +tibble::column_to_rownames("improve_sample_id") + +## summarize drugs + +drug_counts <- fits |> +subset(dose_response_metric == "uM_viability") |> +group_by(improve_drug_id) |> +distinct() |> +summarize( +meanResponse = mean(dose_response_value, na.rm = TRUE), +nMeasured = n_distinct(improve_sample_id), +variability = sd(dose_response_value, na.rm = TRUE), +.groups = "drop" +) + +# -------- Plot 1: most efficacious -------- + +p1 <- drug_counts |> +arrange(desc(meanResponse)) |> +subset(meanResponse < 0.5) |> +ggplot(aes(y = meanResponse, x = improve_drug_id, colour = nMeasured, size = variability)) + +geom_point() + +theme_minimal() + +theme( +axis.text.x = element_text(angle = 45, hjust = 1) # tilt x labels +) + +labs(title = "Most efficacious drugs", +y = "Mean cell viability (fraction)", +x = "Drug") + +ggsave(file.path("figs/most_efficacious.pdf"), p1, width = 12, height = 8, dpi = 300) + +# -------- Plot 2: most variable -------- + +p2 <- drug_counts |> +arrange(desc(variability)) |> +subset(variability > 0.15) |> +as.data.frame() |> +ggplot(aes(y = meanResponse, x = improve_drug_id, colour = nMeasured, size = variability)) + +geom_point() + +theme_minimal() + +theme( +axis.text.x = element_text(angle = 45, hjust = 1) # tilt x labels +) + +labs(title = "Most variable drugs", +y = "Mean cell viability (fraction)", +x = "Drug") + +ggsave(file.path("figs/most_variable.pdf"), p2, width = 12, height = 8, dpi = 300) + +# -------- Plot 3: heatmap of complete-measurement drugs -------- + +fulldrugs <- drug_counts |> +subset(nMeasured == nrow(drug_dat)) + +# Save large, rotate column labels, and shrink font to avoid overlap + +# pheatmap can write directly to a file via the `filename` arg. +# 1. Sort samples alphanumerically +drug_dat_alpha <- drug_dat[order(rownames(drug_dat)), , drop = FALSE] + +# 2. Get patient prefix (e.g. "NF0017" from "NF0017_T1") +samples <- rownames(drug_dat_alpha) +prefixes <- sub("_.*$", "", samples) + +# 3. Indices where the prefix changes (group boundaries) +gap_idx <- which(prefixes[-1] != prefixes[-length(prefixes)]) + +# 4. Heatmap with thin black lines + group separators, saved to file +pheatmap::pheatmap( + as.matrix(drug_dat_alpha[, fulldrugs$improve_drug_id, drop = FALSE]), + filename = file.path("figs/drug_heatmap_grouped.pdf"), + width = 28, + height = 16, + angle_col = 45, + fontsize_col = 6, + cluster_rows = FALSE, # keep alphabetical order + cluster_cols = TRUE, + show_rownames = TRUE, + show_colnames = TRUE, + gaps_row = gap_idx, # group separators + border_color = "black" # thin black grid lines +) + + + +# ---- PRINT plots in the document as well ---- + +print(p1) +print(p2) + +pheatmap::pheatmap( +as.matrix(drug_dat_alpha[, fulldrugs$improve_drug_id, drop = FALSE]), +angle_col = 45, +fontsize_col = 6, +cluster_rows = FALSE, +cluster_cols = TRUE, +show_rownames = TRUE, +show_colnames = TRUE +) + +``` + + +# Basic correlation tests + +Can we simply find and rank proteins/psites/transcripts by correlation and do enrichment? + +We can define a simple test to correlate features and drugs and assess significance and correct: + +```{r correlation tests, warning=FALSE, error=FALSE, message = FALSE} + +#this function computes correlations between all columns for each drug/feature matrix, rows are the sample identifiers +#also coputes significance +computeCors <- function(drug_dat,feat_dat,shared){ + + cres <- cor(drug_dat[shared,],feat_dat[shared,],use='pairwise.complete.obs',method='spearman') |> + as.data.frame() |> + tibble::rownames_to_column('drug')|> + tidyr::pivot_longer(cols=2:(1+ncol(feat_dat)),names_to='gene',values_to='cor') |> + arrange(desc(cor)) + + ##now lets try to get significance + csig <- do.call(rbind,lapply(colnames(drug_dat),function(x){ + do.call(rbind,lapply(colnames(feat_dat),function(y){ + pval <- 1.0 + try(pval <- cor.test(drug_dat[shared,x], + feat_dat[shared,y], + use = 'pairwise.complete.obs', + method = 'spearman')$p.value,silent = TRUE) + + return(c(corp = pval,drug = x,gene = y)) + })) |> + as.data.frame() |> + mutate(fdr=p.adjust(unlist(corp),method='fdr')) + })) |> + as.data.frame() |> + mutate(drug = unlist(drug)) |> + mutate(gene = unlist(gene)) + + fullcors <- cres|>left_join(data.frame(csig)) |> + mutate(direction=ifelse(cor<0,'neg','pos')) + + return(fullcors) +} + +``` + +Now that we have a function we can compute correlations of each data type. + +```{r compute feature cors, warning=FALSE, error=FALSE, message = FALSE} + +gcor <- computeCors(drug_dat[,fulldrugs$improve_drug_id],glob_dat,shared) |> + mutate(data='proteins') +pcor <- computeCors(drug_dat[,fulldrugs$improve_drug_id],phos_dat,shared) |> + mutate(data = 'phosphosites') + +allcor <- rbind(gcor,pcor) + +corsummary<-allcor |> subset(fdr<0.25) |> + group_by(drug,data,direction) |> + summarize(features=n(),meanCor=mean(cor)) + + +p_features <- corsummary |> + subset(features > 1) |> + ggplot(aes(x = drug,y = features,fill = direction)) + + facet_grid(data~.) + + geom_bar(position='dodge',stat='identity') + + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1)) + +ggsave(filename = file.path("figs/cor_features_by_drug.pdf"), + plot = p_features, width = 12, height = 6, units = "in") + +corsummary |> + arrange(desc(features)) |> + subset(features > 100) |> + dplyr::select(drug,data,direction,features,meanCor) + + + +``` + +Now we have the correlation values. what do we do with them? +## Correlation based enrichment +Do not run this everytime - it is extremely slow, so its setup to run once and save the data. The next steps load this data. +```{r functional enrichment} +# === Direction-aware leapR enrichment: run "top" (up) and "bottom" (down) separately === +# Requires: glob_dat (samples x proteins), phos_dat (samples x phosphosites), fits (drug responses) +# Outputs: +# prot_enrich[[drug]]$top / $bottom +# phos_enrich[[drug]]$top / $bottom +# Optional CSVs in folder "leapR_top_paths/dir_split/" + +library(dplyr) +library(tidyr) +library(stringr) +library(SummarizedExperiment) +library(leapR) + +# ---- choose drugs to run (use your two, or set to a larger list) ---- +# target_drugs <- c("THZ1", "Onalespib") +target_drugs <- unique(fits$improve_drug_id) + +# ---- genesets ---- +data(msigdb); geneset_db <- msigdb # or ncipid +data(kinasesubstrates) + +# ---- helpers ---- +extract_gene_from_site <- function(site_id) { + if (is.na(site_id) || site_id == "") return(NA_character_) + g <- str_split(as.character(site_id), "[:_\\-\\.]")[[1]][1] + toupper(stringr::str_extract(g, "^[A-Za-z0-9]+")) +} + +# Correlate response vector vs each feature column (Spearman) +col_spearman <- function(vec, mat) { + shared <- intersect(names(vec), rownames(mat)) + if (length(shared) < 3) return(setNames(rep(NA_real_, ncol(mat)), colnames(mat))) + v <- vec[shared] + m <- as.matrix(mat[shared, , drop = FALSE]) + apply(m, 2, function(col) { + if (all(is.na(col))) return(NA_real_) + if (sd(col, na.rm = TRUE) == 0 || sd(v, na.rm = TRUE) == 0) return(NA_real_) + suppressWarnings(cor(v, col, method = "spearman", use = "pairwise.complete.obs")) + }) +} + +# Build one-column SE; column name must match primary_columns +build_se_from_corvec <- function(cor_named_vec, features_all, col_label, map_to_gene = NULL, assay_label = "proteomics") { + v <- rep(NA_real_, length(features_all)); names(v) <- features_all + common <- intersect(names(cor_named_vec), features_all) + v[common] <- cor_named_vec[common] + mat <- matrix(v, nrow = length(v), ncol = 1, dimnames = list(features_all, col_label)) + rd <- DataFrame(feature_id = features_all) + rd$hgnc_id <- if (is.null(map_to_gene)) features_all else map_to_gene[features_all] + se <- SummarizedExperiment(assays = list(values = mat), rowData = rd, colData = DataFrame(sample = col_label)) + assayNames(se) <- assay_label + se +} + +safe_leapr <- function(...) tryCatch(leapR::leapR(...), error = function(e) { message("[leapR] ", conditionMessage(e)); NULL }) + +# ---- feature and mapping vectors ---- +prot_features <- colnames(glob_dat) +phos_features <- colnames(phos_dat) +phos_to_gene <- setNames(vapply(phos_features, extract_gene_from_site, FUN.VALUE = character(1)), + phos_features) + +# ---- results containers ---- +prot_enrich <- list() +phos_enrich <- list() + +# optional: write CSVs? +write_csvs <- TRUE +outdir <- "leapR_top_paths/dir_split" +if (write_csvs && !dir.exists(outdir)) dir.create(outdir, recursive = TRUE) + +# ---- per-drug workflow ---- +for (drug in target_drugs) { + message("=== ", drug, " ===") + # build response vector (mean per sample if repeats) + dv <- fits %>% + filter(improve_drug_id == !!drug, dose_response_metric == "uM_viability") %>% + group_by(improve_sample_id) %>% summarize(resp = mean(dose_response_value, na.rm = TRUE), .groups = "drop") + if (nrow(dv) == 0) { message(" no response rows; skipping"); next } + dv_vec <- setNames(dv$resp, dv$improve_sample_id) + + # correlations + prot_cor <- col_spearman(dv_vec, glob_dat) + phos_cor <- col_spearman(dv_vec, phos_dat) + + # split by sign + prot_pos <- prot_cor[!is.na(prot_cor) & prot_cor > 0] + prot_neg <- prot_cor[!is.na(prot_cor) & prot_cor < 0] + phos_pos <- phos_cor[!is.na(phos_cor) & phos_cor > 0] + phos_neg <- phos_cor[!is.na(phos_cor) & phos_cor < 0] + + # ---- global: TOP (positives as-is), BOTTOM (negatives flipped so they rank to the top) ---- + prot_enrich[[drug]] <- list(top = NULL, bottom = NULL) + + # TOP + if (length(prot_pos) >= 5) { + se_prot_top <- build_se_from_corvec(prot_pos, prot_features, col_label = paste0(drug, "_TOP"), assay_label = "proteomics") + prot_top <- safe_leapr(geneset = geneset_db, enrichment_method = "enrichment_in_order", + eset = se_prot_top, assay_name = "proteomics", + primary_columns = paste0(drug, "_TOP")) + prot_enrich[[drug]]$top <- prot_top + if (write_csvs && !is.null(prot_top)) { + write.csv(as.data.frame(prot_top), file = file.path(outdir, paste0(drug, "_global_TOP.csv"))) + } + } else message(" PROT top: too few positive features (", length(prot_pos), ")") + + # BOTTOM (flip sign so more negative = larger positive rank) + if (length(prot_neg) >= 5) { + se_prot_bot <- build_se_from_corvec(-prot_neg, prot_features, col_label = paste0(drug, "_BOTTOM"), assay_label = "proteomics") + prot_bot <- safe_leapr(geneset = geneset_db, enrichment_method = "enrichment_in_order", + eset = se_prot_bot, assay_name = "proteomics", + primary_columns = paste0(drug, "_BOTTOM")) + prot_enrich[[drug]]$bottom <- prot_bot + if (write_csvs && !is.null(prot_bot)) { + write.csv(as.data.frame(prot_bot), file = file.path(outdir, paste0(drug, "_global_BOTTOM.csv"))) + } + } else message(" PROT bottom: too few negative features (", length(prot_neg), ")") + + # ---- PHOSPHO: TOP/BOTTOM for pathways (gene mapping via hgnc_id) ---- + phos_enrich[[drug]] <- list(top = NULL, bottom = NULL) + + # TOP + if (length(phos_pos) >= 5) { + se_phos_top <- build_se_from_corvec(phos_pos, phos_features, col_label = paste0(drug, "_TOP"), + map_to_gene = phos_to_gene, assay_label = "phosphoproteomics") + phos_top <- safe_leapr(geneset = geneset_db, enrichment_method = "enrichment_in_order", + eset = se_phos_top, assay_name = "phosphoproteomics", + primary_columns = paste0(drug, "_TOP"), id_column = "hgnc_id") + phos_enrich[[drug]]$top <- phos_top + if (write_csvs && !is.null(phos_top)) { + write.csv(as.data.frame(phos_top), file = file.path(outdir, paste0(drug, "_phospho_TOP.csv"))) + } + } else message(" PHOS top: too few positive features (", length(phos_pos), ")") + + # BOTTOM (flip) + if (length(phos_neg) >= 5) { + se_phos_bot <- build_se_from_corvec(-phos_neg, phos_features, col_label = paste0(drug, "_BOTTOM"), + map_to_gene = phos_to_gene, assay_label = "phosphoproteomics") + phos_bot <- safe_leapr(geneset = geneset_db, enrichment_method = "enrichment_in_order", + eset = se_phos_bot, assay_name = "phosphoproteomics", + primary_columns = paste0(drug, "_BOTTOM"), id_column = "hgnc_id") + phos_enrich[[drug]]$bottom <- phos_bot + if (write_csvs && !is.null(phos_bot)) { + write.csv(as.data.frame(phos_bot), file = file.path(outdir, paste0(drug, "_phospho_BOTTOM.csv"))) + } + } else message(" PHOS bottom: too few negative features (", length(phos_neg), ")") + +} + +# Save all direction-split results for later reuse +save(prot_enrich, phos_enrich, file = "leapR_enrichment_direction_split.Rdata") + +message("Finished direction-aware enrichment. Results in lists prot_enrich / phos_enrich, and CSVs (if enabled).") + + +``` + + + +For each drug, how many terms do we see active? how many kinases? +```{r functional enrichment} +# ==== Load saved enrichment & build summaries (no list-casts, no count()) ==== +library(dplyr) +library(tidyr) +library(purrr) +library(tibble) +library(ggplot2) +library(forcats) +library(stringr) +library(scales) + +# Always load the precomputed enrichment lists here +load("leapR_enrichment_direction_split.Rdata") +if (!exists("prot_enrich")) stop("prot_enrich not found in leapR_enrichment_direction_split.Rdata") +if (!exists("phos_enrich")) stop("phos_enrich not found in leapR_enrichment_direction_split.Rdata") + +alpha <- 0.05 +topN <- 15 # <<< top 15 +dirs <- c("resistant","sensitive") + +# ---------- helpers ---------- +pick_pcol <- function(df) { + cols <- colnames(df) + if ("BH_pvalue" %in% cols) return(list(kind="adj", col="BH_pvalue")) + if ("SignedBH_pvalue" %in% cols) return(list(kind="adj", col="SignedBH_pvalue")) + if ("adj.P.Val" %in% cols) return(list(kind="adj", col="adj.P.Val")) + if ("padj" %in% cols) return(list(kind="adj", col="padj")) + if ("pvalue" %in% cols) return(list(kind="raw", col="pvalue")) + if ("P.Value" %in% cols) return(list(kind="raw", col="P.Value")) + NULL +} + +extract_term_col <- function(df) { + cands <- c("term","Term","pathway","Pathway","set","Set","geneset","gene_set","Category") + hit <- cands[cands %in% names(df)] + if (length(hit)) hit[[1]] else NULL +} + +tidy_one_result <- function(x) { + if (is.null(x)) return(tibble(pathway = character(), adj_p = numeric())) + df <- as.data.frame(x) + if (!nrow(df)) return(tibble(pathway = character(), adj_p = numeric())) + + term_col <- extract_term_col(df) + if (is.null(term_col)) { + df <- tibble::rownames_to_column(df, "pathway") + } else { + df <- dplyr::mutate(df, pathway = .data[[term_col]]) + } + df$pathway <- as.character(df$pathway) + + pk <- pick_pcol(df) + if (is.null(pk)) return(tibble(pathway = character(), adj_p = numeric())) + adj <- if (pk$kind == "adj") df[[pk$col]] else p.adjust(df[[pk$col]], method = "BH") + + tibble(pathway = df$pathway, adj_p = as.numeric(adj)) |> + filter(is.finite(adj_p), !is.na(adj_p)) +} + +flatten_by_direction <- function(lst, omic_label) { + if (!length(lst)) return(tibble()) + purrr::imap_dfr(lst, function(two, drug) { + bind_rows( + tidy_one_result(two$top) |> mutate(direction = "resistant"), + tidy_one_result(two$bottom) |> mutate(direction = "sensitive") + ) |> + mutate(drug = as.character(drug), omic = omic_label) + }) +} + +# ---------- long-format enrichment and significance filter ---------- +prot_long <- flatten_by_direction(prot_enrich, "global") +phos_long <- flatten_by_direction(phos_enrich, "phospho") +enrich_long <- bind_rows(prot_long, phos_long) |> as_tibble() +stopifnot(all(c("pathway","adj_p","direction","drug","omic") %in% names(enrich_long))) + +enrich_sig <- enrich_long |> + filter(is.finite(adj_p), !is.na(adj_p), adj_p < alpha) + +if (nrow(enrich_sig) == 0) { + message("No significant pathways at FDR < ", alpha, ".") + pathway_summary <- tibble() + drug_counts <- tibble() +} else { + pathway_summary <- enrich_sig |> + group_by(omic, direction, pathway) |> + summarise(n_drugs = n_distinct(drug), .groups = "drop") |> + arrange(desc(n_drugs)) + + all_drugs <- sort(unique(enrich_long$drug)) + drug_counts <- enrich_sig |> + group_by(drug, direction) |> + summarise(n_pathways = n_distinct(pathway), .groups = "drop") |> + complete(drug = all_drugs, direction = dirs, fill = list(n_pathways = 0L)) |> + arrange(drug, direction) + + # ---------- summary figures ---------- + dir.create("figs", showWarnings = FALSE) + + reorder_within <- function(x, by, within, sep = "___") { + x2 <- paste(x, within, sep = sep); stats::reorder(x2, by) + } + scale_y_reordered_wrap <- function(width = 32, sep = "___") { + ggplot2::scale_y_discrete( + labels = function(x) stringr::str_wrap(gsub(paste0(sep, ".*$"), "", x), width = width) + ) + } + + pathway_summary_top <- pathway_summary |> + group_by(omic, direction) |> + slice_max(order_by = n_drugs, n = topN, with_ties = FALSE) |> + ungroup() |> + mutate(pathway_in_omic = reorder_within(pathway, n_drugs, omic)) + + p_pathways <- ggplot(pathway_summary_top, + aes(y = pathway_in_omic, x = n_drugs, fill = direction)) + + geom_col(position = position_dodge(width = 0.85), width = 0.85) + + facet_wrap(~ omic, scales = "free_y") + + scale_y_reordered_wrap(width = 36) + + scale_x_continuous(expand = expansion(mult = c(0, 0.05))) + + labs(title = paste0("Top ", topN, " pathways enriched across drugs"), + y = "Pathway", x = "# Drugs") + + theme_minimal(base_size = 11) + + theme( + legend.position = "top", + strip.text = element_text(face = "bold"), + axis.text.y = element_text(size = 7), + panel.grid.major.x = element_blank(), + plot.margin = margin(5.5, 30, 5.5, 5.5) + ) + + coord_cartesian(clip = "off") + + # --- PDF saves (summary figs) --- + ggsave("figs/pathways_across_drugs_top15.pdf", p_pathways, + width = 12, height = 10, units = "in", device = cairo_pdf) + print(p_pathways) + + drug_counts_full <- drug_counts |> + group_by(drug) |> + mutate(total = sum(n_pathways)) |> + ungroup() |> + mutate(drug = forcats::fct_reorder(drug, total)) + + p_counts <- ggplot(drug_counts_full, aes(x = drug, y = n_pathways, fill = direction)) + + geom_col(position = position_dodge(width = 0.9), width = 0.85) + + labs(title = "Number of enriched pathways per drug", + x = "Drug", y = "# Pathways") + + theme_minimal(base_size = 11) + + theme( + legend.position = "top", + axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1, size = 6), + panel.grid.major.x = element_blank() + ) + + ggsave("figs/enriched_pathways_per_drug_wide.pdf", p_counts, + width = 18, height = 7, units = "in", device = cairo_pdf) + print(p_counts) +} + +# ---------- Top-2 most efficacious & most variable drugs ---------- +top2_efficacious <- character(0) +top2_variable <- character(0) + +if (exists("fits")) { + eff_tbl <- fits |> + filter(dose_response_metric == "uM_viability") |> + group_by(improve_drug_id) |> + summarise( + meanResponse = mean(dose_response_value, na.rm = TRUE), + variability = sd(dose_response_value, na.rm = TRUE), + nMeasured = dplyr::n_distinct(improve_sample_id), + .groups = "drop" + ) + top2_efficacious <- eff_tbl |> + arrange(meanResponse, desc(nMeasured)) |> + slice_head(n = 2) |> + pull(improve_drug_id) + top2_variable <- eff_tbl |> + arrange(desc(variability), desc(nMeasured)) |> + slice_head(n = 2) |> + pull(improve_drug_id) +} else { + some <- unique(enrich_long$drug) + top2_efficacious <- head(some, 2) + top2_variable <- head(rev(some), 2) +} + +# ---- Force-include Onalespib in top_interest (case-insensitive) ---- +ona_matches <- unique(enrich_long$drug[grepl("^onalespib$", enrich_long$drug, ignore.case = TRUE)]) +if (length(ona_matches) == 0) ona_matches <- "Onalespib" + +top_interest <- unique(c(top2_efficacious, top2_variable, ona_matches)) +message("Top-2 most efficacious (lowest mean viability): ", paste(top2_efficacious, collapse = ", ")) +message("Top-2 most variable (highest SD): ", paste(top2_variable, collapse = ", ")) +message("Force-included: ", paste(ona_matches, collapse = ", ")) + +# ---------- per-drug pathway barplots: always top 15, star-annotate significance ---------- +pick_pcol_plot <- function(df) { + cols <- colnames(df) + if ("BH_pvalue" %in% cols) return("BH_pvalue") + if ("SignedBH_pvalue" %in% cols) return("SignedBH_pvalue") + if ("adj.P.Val" %in% cols) return("adj.P.Val") + if ("padj" %in% cols) return("padj") + if ("pvalue" %in% cols) return("pvalue") + if ("P.Value" %in% cols) return("P.Value") + NA_character_ +} + +sig_stars <- function(p) dplyr::case_when( + is.na(p) ~ "", + p < 0.001 ~ "***", + p < 0.01 ~ "**", + p < 0.05 ~ "*", + TRUE ~ "" +) + +prep_plot_df <- function(res_df, n = 15) { + if (is.null(res_df)) return(NULL) + df <- as.data.frame(res_df); if (!nrow(df)) return(NULL) + if (!("feature" %in% names(df))) df <- tibble::rownames_to_column(df, "feature") + col <- pick_pcol_plot(df); if (is.na(col)) return(NULL) + + # Always compute BH adj p; then take top 15 by smallest adj_p (no significance filter) + adj_p <- if (col %in% c("pvalue","P.Value")) p.adjust(df[[col]], method = "BH") else df[[col]] + + df |> + mutate( + adj_p = as.numeric(adj_p), + score = -log10(pmax(adj_p, 1e-300)), + stars = sig_stars(adj_p), + signif = !is.na(adj_p) & adj_p < 0.05, + feature = stringr::str_wrap(as.character(feature), width = 40) + ) |> + filter(is.finite(adj_p)) |> + arrange(adj_p, desc(score)) |> + slice_head(n = n) +} + +plot_bar <- function(df_plot, title_text, label_type) { + if (is.null(df_plot) || !nrow(df_plot)) return(NULL) + lbl <- ifelse(label_type == "top", "Resistant pathways", "Sensitive pathways") + fillc <- ifelse(label_type == "top", "#D7191C", "#2C7BB6") + star_pad <- 0.15 + + ggplot(df_plot, aes(x = reorder(feature, score), y = score)) + + geom_col(aes(alpha = signif), fill = fillc, width = 0.85) + + scale_alpha_manual(values = c(`FALSE` = 0.5, `TRUE` = 1), guide = "none") + + geom_text(aes(y = score + star_pad, label = stars), + size = 3, hjust = 0) + + coord_flip(clip = "off") + + scale_y_continuous(expand = expansion(mult = c(0, 0.15))) + + labs(title = paste0(title_text, ", ", lbl), + x = NULL, y = expression(-log[10]("FDR"))) + + theme_minimal(base_size = 9) + + theme( + plot.title = element_text(size = 11), + axis.text.y = element_text(size = 7), + axis.text.x = element_text(size = 8), + plot.margin = margin(5.5, 30, 5.5, 5.5) + ) +} + +safelabel <- function(x) gsub("[^A-Za-z0-9_.-]", "_", x) + +dir.create("figs", showWarnings = FALSE) +for (drug in top_interest) { + pt <- if (drug %in% names(prot_enrich)) prot_enrich[[drug]]$top else NULL + pb <- if (drug %in% names(prot_enrich)) prot_enrich[[drug]]$bottom else NULL + ft <- if (drug %in% names(phos_enrich)) phos_enrich[[drug]]$top else NULL + fb <- if (drug %in% names(phos_enrich)) phos_enrich[[drug]]$bottom else NULL + + p1 <- plot_bar(prep_plot_df(pt, n = 15), paste0(drug, " - global"), "top") + p2 <- plot_bar(prep_plot_df(pb, n = 15), paste0(drug, " - global"), "bottom") + p3 <- plot_bar(prep_plot_df(ft, n = 15), paste0(drug, " - Phospho"), "top") + p4 <- plot_bar(prep_plot_df(fb, n = 15), paste0(drug, " - Phospho"), "bottom") + + if (!is.null(p1)) { + ggsave(file.path("figs", paste0("pathways_", safelabel(drug), "_global_resistant_top15.pdf")), + p1, width = 7, height = 5, device = cairo_pdf); print(p1) + } + if (!is.null(p2)) { + ggsave(file.path("figs", paste0("pathways_", safelabel(drug), "_global_sensitive_top15.pdf")), + p2, width = 7, height = 5, device = cairo_pdf); print(p2) + } + if (!is.null(p3)) { + ggsave(file.path("figs", paste0("pathways_", safelabel(drug), "_phospho_resistant_top15.pdf")), + p3, width = 7, height = 5, device = cairo_pdf); print(p3) + } + if (!is.null(p4)) { + ggsave(file.path("figs", paste0("pathways_", safelabel(drug), "_phospho_sensitive_top15.pdf")), + p4, width = 7, height = 5, device = cairo_pdf); print(p4) + } +} + + + +``` + + + + +# Print siginficant results for all drugs if we want. +```{r} +library(dplyr) +library(ggplot2) +library(tibble) +library(rlang) + +alpha <- 0.05 # significance threshold +top_n_to_show <- 15 + +# Prefer adjusted p if available; fall back to raw and adjust per-run +pick_pcol <- function(df) { + cols <- colnames(df) + if ("BH_pvalue" %in% cols) return(list(kind="adj", col="BH_pvalue")) + if ("SignedBH_pvalue" %in% cols) return(list(kind="adj", col="SignedBH_pvalue")) + if ("pvalue" %in% cols) return(list(kind="raw", col="pvalue")) + return(NULL) +} + +prep_plot_df <- function(res_df, n = top_n_to_show) { + if (is.null(res_df)) return(NULL) + df <- as.data.frame(res_df) + if (!nrow(df)) return(NULL) + + pick <- pick_pcol(df) + if (is.null(pick)) return(NULL) + + # unify to adj p + if (pick$kind == "adj") { + df <- df %>% mutate(adj_p = !!sym(pick$col)) + } else { # raw p → adjust within this run + df <- df %>% mutate(adj_p = p.adjust(!!sym(pick$col), method = "BH")) + } + + df %>% + rownames_to_column("feature") %>% + arrange(adj_p) %>% + # keep only significant ones; if none, return empty (caller will message) + filter(is.finite(adj_p), !is.na(adj_p), adj_p < alpha) %>% + head(n) %>% + mutate(score = -log10(pmax(adj_p, 1e-300))) +} + +plot_bar <- function(df_plot, title_text, label_type) { + if (is.null(df_plot) || !nrow(df_plot)) { + message(" No significant pathways for ", title_text, " (FDR<", alpha, ").") + return(NULL) + } + # Correct labels & colors for uM_viability convention: + # TOP -> resistant (red), BOTTOM -> sensitive (blue) + lbl <- ifelse(label_type == "top", "Resistant pathways", "Sensitive pathways") + fillc <- ifelse(label_type == "top", "#D7191C", "#2C7BB6") + + p <- ggplot(df_plot, aes(x = reorder(feature, score), y = score)) + + geom_col(fill = fillc) + + coord_flip() + + labs(title = paste0(title_text, ", ", lbl), + x = NULL, y = expression(-log[10]("FDR"))) + + theme_minimal(base_size = 9) + + theme( + plot.title = element_text(size = 11), + axis.text.y = element_text(size = 6), + axis.text.x = element_text(size = 8) + ) + print(p); invisible(p) +} + +plot_drug_panels <- function(drug) { + message("\n=== ", drug, " ===") + + # global + pt <- if (drug %in% names(prot_enrich)) prot_enrich[[drug]]$top else NULL + pb <- if (drug %in% names(prot_enrich)) prot_enrich[[drug]]$bottom else NULL + plot_bar(prep_plot_df(pt), paste0(drug, ", Global Proteomics"), "top") + plot_bar(prep_plot_df(pb), paste0(drug, ", Global Proteomics"), "bottom") + + # Phospho pathways + ft <- if (drug %in% names(phos_enrich)) phos_enrich[[drug]]$top else NULL + fb <- if (drug %in% names(phos_enrich)) phos_enrich[[drug]]$bottom else NULL + plot_bar(prep_plot_df(ft), paste0(drug, ", Phosphoproteomics"), "top") + plot_bar(prep_plot_df(fb), paste0(drug, ", Phosphoproteomics"), "bottom") +} + + +# --- run for your drugs --- +for (d in target_drugs) plot_drug_panels(d) + +``` + + + + +# Basic drug list +```{r} +drug_counts <- fits %>% + filter(dose_response_metric == "uM_viability") %>% + group_by(improve_drug_id) %>% + summarise( + n_rows = dplyr::n(), # total rows/measurements + n_specimens = n_distinct(improve_sample_id), # unique samples tested + meanResponse = mean(dose_response_value, na.rm = TRUE), + sdResponse = sd(dose_response_value, na.rm = TRUE), + .groups = "drop" + ) %>% + arrange(desc(n_specimens), improve_drug_id) + +# Plain list of drugs + total count +drug_list <- sort(unique(drug_counts$improve_drug_id)) +n_drugs <- length(drug_list) + +message(sprintf("Total unique drugs: %d", n_drugs)) +print((drug_list)) + +``` + + + + + + + + + + + + + + + + + + + + + +## Visualization +How should we visualize? Here is some older code +```{r plot cors, eval=FALSE} + +plotCors <- function(features,druglist,dataType='proteins'){ + ##subset a list of features and drugs and plot those in a graph + require(ggplot2) + if(dataType=='proteins'){ + ptab<-glong|>dplyr::rename(feature='Gene') + }else{ + ptab<-plong|>dplyr::rename(feature='site') + } + dtab<-fits|> + subset(dose_response_metric=='uM_viability')|> + dplyr::rename(Specimen='improve_sample_id',Drug='improve_drug_id')|> + subset(Drug%in%druglist) + + + ftab<-features|>left_join(ptab)|>left_join(dtab)|> + subset(!is.na(Drug)) + + feats <- unique(features$feature) + plots <- lapply(feats,function(x){ + corval <- ftab[ftab$feature==x,'cor'] + #corval <- ftab[ftab$feature==x,'pCor'] + + ftab|>subset(feature==x)|> + ggplot(aes(x=correctedAbundance,y=dose_response_value, + col=Patient,size=1))+ + geom_point()+ + facet_grid(~Drug)+ + ggtitle(paste(x,'Drug correlation'))+ + scale_color_manual(values=pcols) + }) + cowplot::plot_grid(plotlist= plots,ncol=2) + +} + +druglist<-c('Onalespib') +features<-subset(allcor,drug%in%druglist)|> + subset(fdr<0.25)|> + subset(abs(cor)>0.7)|> + subset(data=='proteins')|> + arrange(desc(abs(cor))) + +plotCors(rename(features[1:10,],feature='gene'),druglist) + +ggsave('onalespibFDR0.25Cors.pdf',height=20) + +``` + +# Random forest predictor + +Here we try to use random forest to extract predictive features. First we need to assess if the model can accurately predict drug response from the data. From those predictive models, we can extract features/biomarkers. + +First we build the data frames needed - I've included cohort as a covariate but may remove it. + +```{r random forest} +## separate out cohorts for prediction +cohorts <- meta |> + select(Specimen,cohort) |> + distinct() |> + tibble::column_to_rownames('Specimen') + +##for each drug, build model of cohort + protein ~ drug response +#removed cohort for now +gdf <- as.data.frame(glob_dat)#|>mutate(cohort=cohorts[rownames(glob_dat),'cohort']) +gnas <- which(apply(gdf,2,function(x) any(is.na(x)))) + +pdf <- as.data.frame(phos_dat)#|> mutate(cohort=cohorts[rownames(phos_dat),'cohort']) +pnas <- which(apply(pdf,2,function(x) any(is.na(x)))) + +mdf <- meta[-c(2,5),]|> ##have duplication here + tibble::column_to_rownames('Specimen') + + +``` + +Now we can loop through every drug, build model, and assess accuracy. + +```{r evaluate predictivty} + +#trying ou tthis function tos ee how it goes +rfFeatures <- function(drug_dat,fdf, mdf){ + complete_drugs <- which(apply(drug_dat,2, + function(x) length(which(!is.na(x)))==length(x))) + print(paste("Evaluating random forest for ",length(complete_drugs),'drugs')) + all_preds <- do.call(rbind,lapply(names(complete_drugs),function(drug){ + + dg <- fdf#[,-gnas] + + ##create the metadata df with the drug of interest + dmdf <- mdf |> + mutate(drug=drug_dat[rownames(mdf),drug]) + + rf <- randomForest::randomForest(x=dg, + y=drug_dat[rownames(dg),drug], + importance=TRUE,ntree=500) + + im <- randomForest::importance(rf)|> + as.data.frame() |> + mutate(drug=drug) + pord <- intersect(rownames(mdf)[order(drug_dat[rownames(mdf),drug])],rownames(glob_dat)) + + #pheatmap::pheatmap(t(glob_dat[pord, + # rownames(im)]),annotation_col=dmdf, + # cellheight=10,cluster_cols = TRUE) + return(im) + ##what do we return?x + })) + return(all_preds) +} + + +``` + +Now what do we do with the importance features? + +```{r rf processing} + +##get importance for global +gimp <- rfFeatures(drug_dat=drug_dat,fdf=gdf,mdf=mdf) + +##get importance for phospho +pimp <- rfFeatures(drug_dat=drug_dat,fdf=pdf,mdf=mdf) + +``` + + + + + + + + + + + + + + + + + +# Old correlation code, dont run + +now we can visualize correlations + +```{r check out HSP90s, eval=FALSE} + +hsps<-unique(glong$Genes[grep('^HSP',glong$Genes)]) + +cor_hsps<-subset(allcor,gene%in%hsps)|>subset(drug=='Onalespib')|>subset(corp<0.2) +#print(paste('measured',length(hsps),'HSPs in global data of which', nrow(cor_hsps),' are correlated with Onalespib')) + +plotCors(rename(cor_hsps,feature='gene'),c('Onalespib')) + +ggsave('hspCorsOna.pdf',height=nrow(cor_hsps)*3) + +hspp<-unique(plong$site[grep('^HSP',plong$site)]) +cor_hspps<-subset(allcor,gene%in%hspp)|>subset(drug=='Onalespib')|>subset(corp<0.2) +#print(paste('measured',length(hspp),'HSPs in phospho data of which',nrow(cor_hspps),' are correlated with Onalespib')) + +plotCors(rename(cor_hspps,feature='gene'),c('Onalespib'),'phospho') + +ggsave('hspPhosphoCorsOna.pdf',height=nrow(cor_hspps)*3) + + +``` + +### Now lets look only at IC50 values + +There are a few drugs for which we have IC50 values + +```{r check ic50 cors,warning=FALSE,error=FALSE, eval=FALSE} + +ifits<-subset(fits,dose_response_metric=='fit_ic50') + +shared<-intersect(ifits$improve_sample_id,glong$Specimen) +print(paste('Found',length(shared),'shared samples')) + +## a full join might be a challenge, maybe just take two matrices +drug_dat <- ifits|> + dplyr::select(improve_sample_id,improve_drug_id,dose_response_value)|> + tidyr::pivot_wider(names_from='improve_drug_id',values_from='dose_response_value',values_fn=mean)|> + tibble::column_to_rownames('improve_sample_id') + +gres<-cor(drug_dat[shared,],glob_dat[shared,],use='pairwise.complete.obs',method='pearson')|> + as.data.frame()|> + tibble::rownames_to_column('drug')|> + tidyr::pivot_longer(cols=2:(1+ncol(glob_dat)),names_to='gene',values_to='pCor')|> + arrange(desc(pCor)) + +##now lets try to get significance + +gsig<-do.call(rbind,lapply(colnames(drug_dat),function(x){ + do.call(rbind,lapply(colnames(glob_dat),function(y){ + pval<-1.0 + try(pval<-cor.test(drug_dat[shared,x],glob_dat[shared,y],use='pairwise.complete.obs',method='pearson')$p.value,silent=TRUE) + return(c(corp=pval,drug=x,gene=y)) + } + ))|>as.data.frame()|> + mutate(fdr=p.adjust(unlist(corp),method='fdr')) + }))|> + as.data.frame()|> + mutate(drug=unlist(drug))|> + mutate(gene=unlist(gene)) + +fullcors<-gres|>left_join(data.frame(gsig))|>mutate(data='global') + +pres<-cor(drug_dat[shared,],phos_dat[shared,],use='pairwise.complete.obs',method='pearson')|> + as.data.frame()|> + tibble::rownames_to_column('drug')|> + tidyr::pivot_longer(cols=2:(1+ncol(phos_dat)),names_to='site',values_to='pCor')|> + arrange(desc(pCor)) + +##now lets look at correlations + +psig<-do.call(rbind,lapply(colnames(drug_dat),function(x){ + do.call(rbind,lapply(colnames(phos_dat),function(y){ + pval<-1.0 + try(pval<-cor.test(drug_dat[shared,x],phos_dat[shared,y],use='pairwise.complete.obs',method='pearson')$p.value,silent=TRUE) + return(c(corp=pval,drug=x,gene=y)) + } + ))|>as.data.frame()|> + mutate(fdr=p.adjust(unlist(corp),method='fdr')) + }))|>as.data.frame() + +fullpcors<-pres|>rename(gene='site')|>left_join(data.frame(psig))|>mutate(data='phospho') + +#combine all correlations +allcor<-rbind(fullcors,fullpcors)|> + mutate(direction=ifelse(pCor<0,'neg','pos')) + + +##lets count the correlations and plot + +corsummary<-allcor|>subset(fdr<0.1)|> + group_by(drug,data,direction)|> + summarize(features=n(),meanCor=mean(pCor)) + +corsummary|> + #subset(features>1)|> + ggplot(aes(x=data,y=features,fill=drug))+ + facet_grid(~direction)+ + geom_bar(position='dodge',stat='identity') + + + +``` + +Again we have onalespib with numerous significantly correlated proteins, and one phosphosite for digoxin showing up . + +```{r plot individual sites, eval=FALSE} + +druglist<-c('Onalespib') +features<-subset(allcor,drug%in%druglist)|> + subset(fdr<0.05)|> + subset(data=='global') + +plotCors(rename(features,feature='gene'),druglist) + + +druglist<-c('Digoxin') +features<-subset(allcor,drug%in%druglist)|> + subset(fdr<0.1)|> + subset(data=='phospho') + +#plotCors(rename(features,feature='gene'),druglist,data='phospho') + + + +``` + +Now we can check the HSP proteins directly + +```{r HSP correlation, eval=FALSE} +hsps<-unique(glong$Genes[grep('^HSP',glong$Genes)]) + +cor_hsps<-subset(allcor,gene%in%hsps)|>subset(drug=='Onalespib')|>subset(corp<0.05) + +print(paste('measured',length(hsps),'HSPs in global data of which', nrow(cor_hsps),' are correlated with Onalespib')) + +plotCors(rename(cor_hsps,feature='gene'),'Onalespib') + +hspp<-unique(plong$site[grep('^HSP',plong$site)]) +cor_hspps<-subset(allcor,gene%in%hspp)|>subset(drug=='Onalespib')|>subset(corp<0.1) + +print(paste('measured',length(hspp),'HSPs in phospho data of which',nrow(cor_hspps),' are correlated with Onalespib')) +``` + +The IC50 result is similar to the viability. Now we can check AUC + +```{r auc hsp check, error=FALSE, warning=FALSE, eval=FALSE} + +ifits<-subset(fits,dose_response_metric=='auc') + +shared<-intersect(ifits$improve_sample_id,glong$Specimen) +print(paste('Found',length(shared),'shared samples')) + +## a full join might be a challenge, maybe just take two matrices +drug_dat <- ifits|> + dplyr::select(improve_sample_id,improve_drug_id,dose_response_value)|> + tidyr::pivot_wider(names_from='improve_drug_id',values_from='dose_response_value',values_fn=mean)|> + tibble::column_to_rownames('improve_sample_id') + +gres<-cor(drug_dat[shared,],glob_dat[shared,],use='pairwise.complete.obs',method='pearson')|> + as.data.frame()|> + tibble::rownames_to_column('drug')|> + tidyr::pivot_longer(cols=2:(1+ncol(glob_dat)),names_to='gene',values_to='pCor')|> + arrange(desc(pCor)) + +##now lets try to get significance + +gsig<-do.call(rbind,lapply(colnames(drug_dat),function(x){ + do.call(rbind,lapply(colnames(glob_dat),function(y){ + pval<-1.0 + try(pval<-cor.test(drug_dat[shared,x],glob_dat[shared,y],use='pairwise.complete.obs',method='pearson')$p.value,silent=TRUE) + return(c(corp=pval,drug=x,gene=y)) + } + ))|>as.data.frame()|> + mutate(bh_p=p.adjust(unlist(corp),method='BH')) + }))|> + as.data.frame()|> + mutate(drug=unlist(drug))|> + mutate(gene=unlist(gene)) + +fullcors<-gres|>left_join(data.frame(gsig))|>mutate(data='global') + +pres<-cor(drug_dat[shared,],phos_dat[shared,],use='pairwise.complete.obs',method='pearson')|> + as.data.frame()|> + tibble::rownames_to_column('drug')|> + tidyr::pivot_longer(cols=2:(1+ncol(phos_dat)),names_to='site',values_to='pCor')|> + arrange(desc(pCor)) + +##now lets look at correlations + +psig<-do.call(rbind,lapply(colnames(drug_dat),function(x){ + do.call(rbind,lapply(colnames(phos_dat),function(y){ + pval<-1.0 + try(pval<-cor.test(drug_dat[shared,x],phos_dat[shared,y],use='pairwise.complete.obs',method='pearson')$p.value,silent=TRUE) + return(c(corp=pval,drug=x,gene=y)) + } + ))|>as.data.frame()|> + mutate(bh_p=p.adjust(unlist(corp),method='BH')) + }))|>as.data.frame() + +fullpcors<-pres|>rename(gene='site')|>left_join(data.frame(psig))|>mutate(data='phospho') + +#combine all correlations +allcor<-rbind(fullcors,fullpcors)|> + mutate(direction=ifelse(pCor<0,'neg','pos')) + + +##lets count the correlations and plot + +corsummary<-allcor|>subset(bh_p<0.1)|> + group_by(drug,data,direction)|> + summarize(features=n(),meanCor=mean(pCor)) + +corsummary|> + #subset(features>1)|> + ggplot(aes(x=data,y=features,fill=drug))+facet_grid(~direction)+geom_bar(position='dodge',stat='identity') + +print(corsummary) + +``` + +```{r HSP correlation again, eval=FALSE} +hsps<-unique(glong$Genes[grep('^HSP',glong$Genes)]) + +cor_hsps<-subset(allcor,gene%in%hsps)|>s +ubset(drug=='Onalespib')|>subset(corp<0.05) +print(paste('measured',length(hsps),'HSPs in global data of which', nrow(cor_hsps),' are correlated with Onalespib')) +cor_hsps + +plotCors(rename(cor_hsps, feature='gene'),'Onalespib') + +hspp<-unique(plong$site[grep('^HSP',plong$site)]) +cor_hspps<-subset(allcor,gene%in%hspp)|>subset(drug=='Onalespib')|>subset(corp<0.1) +print(paste('measured',length(hspp),'HSPs in phospho data of which',nrow(cor_hspps),' are correlated with Onalespib')) +``` diff --git a/03_drug_biomarkers.Rmd b/legacy_code/03_drug_biomarkers_legacy_code.Rmd similarity index 100% rename from 03_drug_biomarkers.Rmd rename to legacy_code/03_drug_biomarkers_legacy_code.Rmd diff --git a/source/00_cNF_helper_code.R b/source/00_cNF_helper_code.R new file mode 100644 index 0000000..ecea8a4 --- /dev/null +++ b/source/00_cNF_helper_code.R @@ -0,0 +1,68 @@ +#00_cNF_helper_code.R +##standard metadata across all cNFs, including colors if possible + + +library(synapser) +synLogin() +syn <- list(get = synapser::synGet, store = synapser::synStore) +library(readxl) +library(tidyr) +library(dplyr) + +meta1 <- readxl::read_xlsx(syn$get('syn65595365')$path) |> + tidyr::separate(Specimen,into=c('Patient','Tumor'),sep='_',remove = FALSE)|> + dplyr::select(Specimen,Patient,Tumor,aliquot)|> + mutate(cohort=1) + +meta2 <- readxl::read_xlsx(syn$get('syn69920464')$path,sheet='Sheet2')|> + tidyr::separate(Specimen,into=c('Patient','Tumor'),sep='_',remove = FALSE)|> + dplyr::select(Specimen,Patient,Tumor,aliquot='SampleAlias')|> + mutate(cohort=2) + +meta <- rbind(meta1,meta2) %>% + filter(!(cohort == 1 & aliquot %in% c(2, 5, 6))) + + +pcols <- c(NF0017='steelblue',NF0021='orange2',NF0019='orchid4', + NF0022='goldenrod4',NF0018='olivedrab',NF0020='darkred', NF0022='tan', + NF0023='darkgrey',NF0025='lightblue',NF0026='yellow3',NF0027='magenta3', + NF0028='lightgreen',NF0031='pink2') + + + +# This function is used in the notebooks. +# All it does is convert a file from pdf to png so it can be easily displayed after Knitting. +pdf_to_png_if_possible <- function(pdf, dpi = 200) { + if (!file.exists(pdf)) return(NULL) + + png <- sub("\\.pdf$", ".png", pdf, ignore.case = TRUE) + + # Rebuild png if it doesn't exist, or if the pdf is newer + rebuild <- !file.exists(png) || (file.info(pdf)$mtime > file.info(png)$mtime) + + if (rebuild) { + if (requireNamespace("pdftools", quietly = TRUE)) { + out <- pdftools::pdf_convert(pdf, format = "png", dpi = dpi, pages = 1) + if (length(out) && file.exists(out[1])) { + if (file.exists(png)) file.remove(png) + file.rename(out[1], png) + } + } else if (requireNamespace("magick", quietly = TRUE)) { + img <- magick::image_read_pdf(pdf, density = dpi) + magick::image_write(img[1], path = png, format = "png") + } else { + # If no converter is available, just fall back to the PDF. + return(pdf) + } + } + + if (file.exists(png)) png else pdf +} + +# This is used to display plots in the Markdown Notebooks +show_plots <- function(paths, dpi = 200) { + ok <- paths[file.exists(paths)] + if (!length(ok)) return(NULL) + show_paths <- vapply(ok, pdf_to_png_if_possible, FUN.VALUE = character(1), dpi = dpi) + knitr::include_graphics(show_paths) +} diff --git a/source/01_normalize_batchcorrect_omics.R b/source/01_normalize_batchcorrect_omics.R new file mode 100644 index 0000000..f33c04e --- /dev/null +++ b/source/01_normalize_batchcorrect_omics.R @@ -0,0 +1,1021 @@ +# ============================================================================= +# normalize_omics_pipeline.R +# +# Main entry point: +# run_modality(modality, batches, meta, syn, ...) +# +# Key inputs (with examples): +# +# Example batches value: +# batches <- list( +# list(syn_id = "syn69963552", cohort = 1, value_start_col = 5, fname_aliquot_index = 8), +# list(syn_id = "syn69947351", cohort = 2, value_start_col = 5, fname_aliquot_index = 9) +# ) +# +# Arguments: +# - modality: Which data type to run: "phospho", "global", or "rna" +# - batches: List of batch configs; each element should include at least: +# * syn_id: Synapse file ID for the wide feature×sample table +# * cohort: Cohort/batch label used for joining meta and for ComBat batching +# Optional per-batch fields: +# * value_start_col: Column index where sample measurement columns begin (auto-detected if NULL) +# * fname_aliquot_index: Token index (split on "_") used to parse aliquot number from sample filenames +# - meta: Sample metadata table used to join batch sample IDs to Patient/Tumor/Specimen +# (cnF_helper_code.R creates this.) +# - syn: Synapse client object used for Synapse reading/upload (synapser) +# - drop_name_substrings: Regex pattern(s); sample columns matching any pattern will be removed (QC/blank runs) +# - out_dir: Output directory for generated CSV/PDF files +# - out_prefix: Default base name for output files; if NULL (recommended), derived from modality (sanitized) +# - upload_parent_id: Synapse project/folder ID to upload outputs into +# (ignored if NULL or write_outputs=FALSE) +# - pcols: Color vector for PCA plotting (names should match Patient IDs) +# (cnF_helper_code.R creates this.) +# - write_outputs: Master toggle to write CSV/PDF outputs and perform uploads +# - save_basename: Override base name used in output filenames (supersedes out_prefix) +# - do_batch_correct: If FALSE, skip ComBat and use combined matrix instead +# ============================================================================= + +suppressPackageStartupMessages({ + library(dplyr) + library(tidyr) + library(stringr) + library(SummarizedExperiment) + library(ggplot2) + library(readr) + library(rlang) +}) + +# Small helpers +modified_zscore <- function(x, na.rm = TRUE) { + # Robust z-score for a numeric vector using median and MAD (less sensitive to + # outliers than mean/SD). If MAD is 0 or NA, returns all zeros. + # Inputs: + # x: numeric vector + # na.rm: TRUE/FALSE; whether to ignore NA when computing median/MAD + # Output: + # numeric vector (same length as x) + m <- suppressWarnings(stats::median(x, na.rm = na.rm)) + md <- suppressWarnings(stats::mad(x, constant = 1, na.rm = na.rm)) + if (is.na(md) || md == 0) return(rep(0, length(x))) + 0.6745 * (x - m) / md # used to convert mean absolute deviation to sd +} + +filter_by_missingness <- function(mat) { + # Filter features (rows) by missingness: keep rows where <= 50% of values are NA. + # Inputs: + # mat: numeric matrix (features x samples) + # Output: + # numeric matrix with a subset of rows retained + keep <- apply(mat, 1, function(r) mean(is.na(r)) <= 0.5) + mat[keep, , drop = FALSE] +} + +union_rows_fill_NA <- function(mats) { + # Align a list of matrices to the union of all rownames (features), filling + # missing feature rows in each matrix with NA. + # Inputs: + # mats: list of numeric matrices with rownames + # Output: + # list of numeric matrices, each reindexed to the same union row set + all_feats <- Reduce(union, lapply(mats, rownames)) + lapply(mats, function(m) { + mm <- matrix(NA_real_, nrow = length(all_feats), ncol = ncol(m), + dimnames = list(all_feats, colnames(m))) + mm[rownames(m), colnames(m)] <- m + mm + }) +} + +collapse_duplicate_features <- function(mat) { + # Collapse duplicate feature IDs (duplicate rownames) by summing values across + # duplicates for each sample (NA treated as 0 for summation). + # Inputs: + # mat: numeric matrix (features x samples) with rownames as feature IDs + # Output: + # numeric matrix with unique rownames (duplicates collapsed) + if (!any(duplicated(rownames(mat)))) return(mat) + grp <- split(seq_len(nrow(mat)), rownames(mat)) + collapsed <- do.call(rbind, lapply(grp, function(ix) colSums(mat[ix, , drop = FALSE], na.rm = TRUE))) + rownames(collapsed) <- names(grp) + collapsed +} + +make_dropper <- function(substrings) { + # Build a function that flags sample column names to drop based on one or more + # regex patterns. Useed for removing protocol optimization runs. + # Inputs: + # substrings: NULL or character vector of regex patterns + # Output: + # function(x): logical vector; TRUE means "drop this name" + if (is.null(substrings) || length(substrings) == 0) return(function(x) rep(FALSE, length(x))) + pattern <- paste0(substrings, collapse = "|") + function(x) grepl(pattern, x, fixed = FALSE) +} + +# Functions to clean up irregular names +# Extract the filename portion from a path or portion of path (drops directories). +# Inputs: +# x: character vector of file paths +# Output: +# character vector of basenames + +# NOTE: This is helpful because some "sample columns" are full Windows paths. + +basename_only <- function(x) sub("^.*[\\\\/]", "", x) +basename_no_ext <- function(x) sub("\\.[^.]+$", "", basename_only(x)) + +normalize_specimen_like <- function(x) { + # Normalize specimen strings to a consistent form for joining (lowercase, + # remove whitespace, unify separators, normalize organoid/tissue/skin labels). + # Inputs: + # x: character vector + # Output: + # character vector of normalized specimen-like strings + + # This normalization exists because different sources encode specimen IDs + # in slightly different ways (e.g., "MN-2_T1_organoid" vs "MN.2 T1 Organoids"). + # The goal is to make joins resilient to punctuation/spacing differences. + y <- tolower(x) + y <- gsub("\\s+", "", y) + y <- gsub("\\.", "-", y) + y <- gsub("_", "-", y) + y <- gsub("organoids?$", "organoid", y) + y <- gsub("-organoids?-", "-organoid-", y) + y <- gsub("skin$", "skin", y) + y <- gsub("tissues?$", "tissue", y) + y <- gsub("--+", "-", y) + y <- gsub("^-|-$", "", y) + y +} + +parse_rna_header_triplet <- function(fnames) { + # Parse RNA sample headers expected to look like "sample.T1.condition" (dot-delimited). + # Extracts sample_id, optional tumor (T#), and condition; also builds a normalized + # specimen key to help join against metadata. + # Inputs: + # fnames: character vector of RNA sample column names + # Output: + # data.frame with columns: fname, sample_id, tumor, condition_raw, condition_norm, specimen_norm + + #RNA headers don't have aliquot (unlike global and phospho) + toks_list <- strsplit(fnames, "\\.") + out <- lapply(seq_along(toks_list), function(i) { + toks <- toks_list[[i]] + sample_id <- if (length(toks) >= 1) toks[[1]] else NA_character_ + + tumor <- NA_character_ + condition_raw <- NA_character_ + + if (length(toks) >= 2) { + if (grepl("^T\\d+$", toks[[2]], ignore.case = TRUE)) { + tumor <- toupper(toks[[2]]) + cond_tokens <- toks[-c(1,2)] + condition_raw <- if (length(cond_tokens)) paste(cond_tokens, collapse = ".") else NA_character_ + } else { + cond_tokens <- toks[-1] + condition_raw <- if (length(cond_tokens)) paste(cond_tokens, collapse = ".") else NA_character_ + } + } + + condition_norm <- condition_raw + if (!is.na(condition_norm)) { + low <- tolower(condition_norm) + if (grepl("^organoids?$", low)) condition_norm <- "organoid" + else if (grepl("^tissues?$", low)) condition_norm <- "tissue" + else if (grepl("^skin$", low)) condition_norm <- "skin" + } + + data.frame( + fname = fnames[i], + sample_id = sample_id, + tumor = ifelse(is.na(tumor), NA_character_, toupper(tumor)), + condition_raw = condition_raw, + condition_norm= condition_norm, + stringsAsFactors = FALSE + ) + }) + df <- do.call(rbind, out) + df$specimen_norm <- with(df, { + sid <- sample_id + tmr <- ifelse(is.na(tumor) | tumor == "", "", paste0("_", tumor)) + cnd <- ifelse(is.na(condition_norm) | condition_norm == "", "", paste0("_", condition_norm)) + paste0(sid, tmr, cnd) + }) + df +} + +# Functions to get data from Synapse +read_wide_from_synapse <- function(syn, syn_id) { + # Download a Synapse file and read it as a wide tab-delimited table (features x samples). + # Inputs: + # syn: Synapse client object (synapser) + # syn_id: Synapse file ID (e.g., "syn69963552") + # Output: + # data.frame containing the wide table (annotation columns + sample columns) + message(" Reading Synapse file: ", syn_id) + df <- read.table( + syn$get(syn_id)$path, + sep = "\t", header = TRUE, quote = '"', + fill = TRUE, check.names = FALSE + ) + message(sprintf(" - Read %d rows × %d cols; first cols: %s", + nrow(df), ncol(df), paste(head(colnames(df), 8), collapse = ", "))) + df +} + +detect_value_start_col <- function(wide_df, fallback = 5) { + # Auto-detect where sample measurement columns start in a wide table by looking for + # headers that resemble file paths or RAW/mzML names. Falls back if not found. + # Inputs: + # wide_df: data.frame (wide) + # fallback: integer column index to use if auto-detection fails + # Output: + # integer column index for the first sample column + nms <- colnames(wide_df) + is_pathy <- grepl("\\.(raw|mzml)$", nms, ignore.case = TRUE) | + grepl("[/\\\\]", nms) | + grepl("^[A-Za-z]:\\\\", nms) + if (any(is_pathy)) { + i <- which(is_pathy)[1] + message(" Auto-detected first sample column at index ", i, " to '", nms[i], "'") + return(i) + } + message(" Did not detect path/RAW headers; using fallback value_start_col=", fallback) + fallback +} + +parse_fnames <- function(fnames, aliquot_field_index, cohort) { + # Parse sample column names into (fname, aliquot, cohort). Attempts to extract aliquot + # from a specific underscore token index; otherwise tries the last numeric token. + # Inputs: + # fnames: character vector of sample column names + # aliquot_field_index: integer token index (split on "_") or NULL + # cohort: cohort label to attach to all parsed samples + # Output: + # data.frame with columns: fname, aliquot (numeric or NA), cohort + message("Parsing filenames to (fname, aliquot, cohort)") + rows <- lapply(fnames, function(fname) { + toks <- strsplit(fname, "_", fixed = TRUE)[[1]] + aliq <- NA_real_ + + if (!is.null(aliquot_field_index) && + aliquot_field_index >= 1 && + aliquot_field_index <= length(toks)) { + aliq_try <- suppressWarnings(as.double(toks[[aliquot_field_index]])) + if (!is.na(aliq_try)) aliq <- aliq_try + } + + if (is.na(aliq)) { + num_tokens <- suppressWarnings(as.double(toks)) + if (any(!is.na(num_tokens))) aliq <- tail(num_tokens[!is.na(num_tokens)], 1) + } + + data.frame( + fname = fname, + aliquot = aliq, + cohort = cohort, + stringsAsFactors = FALSE + ) + }) + out <- do.call(rbind, rows) + message(sprintf(" - Parsed %d samples (aliquot NA: %d)", + nrow(out), sum(is.na(out$aliquot)))) + out +} + +##### +#Feature ID builders +##### +build_phospho_ids <- function(df) { + # Build unique phosphosite feature IDs from phospho annotation columns. + # Inputs: + # df: data.frame containing at least Gene.Names, Residue, Site + # Output: + # character vector of feature IDs (one per row) + lsite <- tolower(df$Residue) + paste0(df$`Gene.Names`, "-", df$Residue, df$Site, lsite) +} + +# Build global proteomics feature IDs (gene symbols). +# Inputs: +# df: data.frame containing a Genes column +# Output: +# character vector of feature IDs (one per row) +build_global_ids <- function(df) as.character(df$Genes) + +build_rna_ids <- function(df) { + # Build RNA feature IDs by selecting a gene identifier column (tries common names + # like gene_id, gene_name, Symbol, Ensembl). Errors if none found. + # Inputs: + # df: data.frame containing a recognized gene ID column + # Output: + # character vector of feature IDs (one per row) + cand <- c("gene_id","Gene","gene","gene_name","Symbol","symbol","ENSEMBL","Ensembl","ensembl_gene_id") + hit <- cand[cand %in% names(df)] + if (length(hit) == 0) stop("RNA feature-id column not found.") + if ("gene_id" %in% hit) return(as.character(df[["gene_id"]])) + if ("gene_name" %in% hit) return(as.character(df[["gene_name"]])) + as.character(df[[hit[[1]]]]) +} +pick_builder <- function(modality) { + # Choose the correct feature-ID builder function based on modality. + # Inputs: + # modality: "phospho", "global", or "rna" (case-insensitive) + # Output: + # function(df) -> character vector of feature IDs + m <- tolower(modality) + if (m == "phospho") return(build_phospho_ids) + if (m == "global") return(build_global_ids) + if (m == "rna") return(build_rna_ids) + stop("Unknown modality: ", modality) +} + +# Functions to Normalize Data. Uses SummarizedExperiment + +coldata_tbl <- function(se) { + # Convert SummarizedExperiment colData into a clean data.frame with consistent + # filename fields (fname, basename, stem). Avoids name collisions. + # Inputs: + # se: SummarizedExperiment + # Output: + # data.frame of sample metadata; includes fname, fname_base, fname_stem + cd <- as.data.frame(SummarizedExperiment::colData(se), stringsAsFactors = FALSE) + if ("fname" %in% names(cd)) names(cd)[names(cd) == "fname"] <- ".coldata_fname" + names(cd) <- make.unique(names(cd), sep = "_") + cd$fname <- rownames(cd) + cd$fname_base <- basename_only(cd$fname) + cd$fname_stem <- basename_no_ext(cd$fname) + cd <- cd[, c("fname","fname_base","fname_stem", setdiff(names(cd), c("fname","fname_base","fname_stem"))), drop = FALSE] + cd +} + +looks_like_sample_header <- function(x) { + # Heuristic test for whether a column name looks like a raw file/sample path + # (e.g., contains slashes or ends in .raw/.mzml). + # Inputs: + # x: character vector of column names + # Output: + # logical vector; TRUE indicates "looks like a sample header" + # Example file: + # "I:\UserData\LeDay\Piehowski_orgonoids_Feb25\RawData\1338241_cNF_organoid_DIA_P_01_29Jan25_Ned_BEHCoA-25-01-02.raw" + grepl("\\.(raw|mzml)$", x, ignore.case = TRUE) | + grepl("[/\\\\]", x) | + grepl("^[A-Za-z]:\\\\", x) +} + +make_se <- function(wide_df, value_start_col, feature_ids, fnames_df, meta, drop_name, modality) { + # Build a SummarizedExperiment from a wide feature x sample table: + # - selects sample columns + # - converts values to numeric + # - drops unwanted sample columns by name pattern + # - attaches sample metadata by joining on (aliquot, cohort) + # - applies extra RNA-specific parsing and metadata reconciliation + # Inputs: + # wide_df: wide data.frame (features + sample columns) + # value_start_col: integer index of first sample column + # feature_ids: character vector of feature IDs (length = nrow(wide_df)) + # fnames_df: data.frame mapping fname->aliquot/cohort (from parse_fnames) + # meta: metadata table used to map aliquot/cohort to Patient/Tumor/Specimen + # drop_name: function(x)->logical; TRUE means drop that sample column + # modality: "phospho", "global", or "rna" + # Output: + # SummarizedExperiment with assay "values" and populated colData/rowData + all_candidate <- colnames(wide_df)[value_start_col:ncol(wide_df)] + has_pathy <- any(looks_like_sample_header(all_candidate)) # Use only path-like columns as sample measurements. + if (has_pathy) { + sample_cols <- all_candidate[looks_like_sample_header(all_candidate)] + sample_cols <- setdiff(sample_cols, c("Site", "Sequence")) + } else { + sample_cols <- setdiff(all_candidate, c("Site", "Sequence")) + } + + # message(" Candidate sample columns (first 6):") + # print(utils::head(sample_cols, 6)) + + message(" Casting measurement block to numeric") + raw_block <- wide_df[, sample_cols, drop = FALSE] + # Clean vals, coerce to numeric, keep original names. + clean_block <- as.data.frame( + lapply(raw_block, function(col) { + if (is.factor(col)) col <- as.character(col) + col[col %in% c("", "NA", "NaN", "na", "n/a", "NULL")] <- NA + suppressWarnings(as.numeric(col)) + }), + check.names = FALSE + ) + # Drop QC/blank samples by regex name patterns. + keep <- !drop_name(colnames(clean_block)) + if (any(!keep)) { + message(" Dropping unwanted sample columns by pattern: ", sum(!keep)) + clean_block <- clean_block[, keep, drop = FALSE] + sample_cols <- sample_cols[keep] + } + + mat <- as.matrix(clean_block) + rownames(mat) <- feature_ids + colnames(mat) <- sample_cols + # Sync metadata rows to kept sample columns. + fnames_df <- fnames_df %>% dplyr::semi_join(data.frame(fname = sample_cols), by = "fname") + + parsed_df <- if (tolower(modality) == "rna") parse_rna_header_triplet(fnames_df$fname) else + data.frame(fname = fnames_df$fname, stringsAsFactors = FALSE) + + message(" Joining sample meta (by aliquot & cohort)") + meta_join <- meta + if ("fname" %in% names(meta_join)) { + message(" ! Debug: Dropping 'fname' column from 'meta' to prevent duplication") + meta_join <- dplyr::select(meta_join, -fname) + } + + cdata <- fnames_df %>% + dplyr::left_join(meta_join, by = c("aliquot","cohort")) %>% + dplyr::left_join(parsed_df, by = "fname") %>% + dplyr::mutate(cohort = as.factor(cohort), + cohort_key = as.character(cohort)) + + if (!"aliquot" %in% names(cdata)) cdata$aliquot <- NA_real_ + if ("aliquot.x" %in% names(cdata) || "aliquot.y" %in% names(cdata)) { + cdata$aliquot <- dplyr::coalesce(cdata$aliquot, cdata$aliquot.x, cdata$aliquot.y) + cdata <- dplyr::select(cdata, -dplyr::any_of(c("aliquot.x","aliquot.y"))) + } + + if (tolower(modality) == "rna") { + meta_norm <- meta %>% + dplyr::mutate( + Specimen_norm = normalize_specimen_like(Specimen), + cohort_key = as.character(cohort) + ) %>% + dplyr::select(Specimen, Specimen_norm, Patient, Tumor, cohort_key) + + cdata2 <- cdata %>% + dplyr::mutate(fname_norm = normalize_specimen_like(ifelse( + is.na(specimen_norm) | specimen_norm == "", fname, specimen_norm + ))) %>% + dplyr::left_join(meta_norm, by = c("cohort_key", "fname_norm" = "Specimen_norm")) + + for (nm in c("Specimen","Patient","Tumor")) { + if (!nm %in% names(cdata)) cdata[[nm]] <- NA + cdata[[nm]] <- dplyr::coalesce(cdata[[nm]], cdata2[[nm]]) + } + + if (!"Specimen" %in% names(cdata)) cdata$Specimen <- NA_character_ + if (!"Patient" %in% names(cdata)) cdata$Patient <- NA_character_ + if (!"Tumor" %in% names(cdata)) cdata$Tumor <- NA_character_ + + cdata$Patient <- dplyr::coalesce(cdata$Patient, cdata$sample_id) + cdata$Tumor <- dplyr::coalesce(cdata$Tumor, cdata$tumor) + + make_specimen <- function(pid, tmr, cond) { + pid_clean <- pid + tmr_clean <- ifelse(is.na(tmr) | tmr == "", "", paste0("_", tmr)) + cond_clean <- ifelse(is.na(cond) | cond == "", "", paste0("_", cond)) + paste0(pid_clean, tmr_clean, cond_clean) + } + need_spec <- is.na(cdata$Specimen) | cdata$Specimen == "" + if (any(need_spec)) { + cdata$Specimen[need_spec] <- make_specimen( + pid = cdata$Patient[need_spec], + tmr = cdata$Tumor[need_spec], + cond = cdata$condition_norm[need_spec] + ) + } + } + + message(" - Example cdata rows:") + # print(utils::head(cdata[, intersect(c( + # "fname","aliquot","cohort","Specimen","Patient","Tumor", + # "sample_id","tumor","condition_raw","condition_norm","specimen_norm" + # ), names(cdata)), drop = FALSE], 10)) + + rn <- cdata$fname + cdata_nofname <- dplyr::select(cdata, -fname) + + rd <- S4Vectors::DataFrame(feature_id = rownames(mat)); rownames(rd) <- rd$feature_id + + se <- SummarizedExperiment::SummarizedExperiment( + assays = S4Vectors::SimpleList(values = mat), + rowData = rd, + colData = S4Vectors::DataFrame(cdata_nofname, row.names = rn) + ) + + message(sprintf(" - SE assay dims: %d feats × %d samples", nrow(se), ncol(se))) + message(sprintf(" - colData names: %s", paste(names(SummarizedExperiment::colData(se)), collapse = ", "))) + se +} + +scale_columns_modified_z <- function(m) { + # Apply modified_zscore() to each column of a matrix (per-sample robust scaling). + # Inputs: + # m: numeric matrix (features x samples) + # Output: + # numeric matrix of same dimensions (column-wise robust z-scored) + out <- m + for (j in seq_len(ncol(m))) out[, j] <- modified_zscore(m[, j]) + out +} + +normalize_by_modality <- function(se, modality) { + # Normalize a SummarizedExperiment assay using modality-specific transforms: + # phospho: 0->NA, filter missingness, log2(x+0.01), robust zscore + # global: log2(x), robust zscore + # rna: filter missingness, log2(x+1), robust zscore + # Also collapses duplicated feature IDs. + # Inputs: + # se: SummarizedExperiment with assay "values" + # modality: "phospho", "global", or "rna" + # Output: + # SummarizedExperiment with normalized assay "values" + mtype <- tolower(modality) + mat0 <- as.matrix(SummarizedExperiment::assay(se, "values")) + c0 <- colnames(mat0) + + message(" Normalizing modality = ", modality) + if (mtype == "phospho") { + mat0[mat0 == 0] <- NA_real_ + mat1 <- filter_by_missingness(mat0) + mlog <- log2(mat1 + 0.01) + mat2 <- scale_columns_modified_z(mlog) + } else if (mtype == "global") { + mlog <- log2(mat0) + mat2 <- scale_columns_modified_z(mlog) + } else if (mtype == "rna") { + mat1 <- filter_by_missingness(mat0) + mlog <- log2(mat1 + 1) + mat2 <- scale_columns_modified_z(mlog) + } else { + abort(paste0("Unknown modality: ", modality)) + } + + mat3 <- collapse_duplicate_features(mat2) + stopifnot(identical(colnames(mat3), c0)) + + rd <- S4Vectors::DataFrame(feature_id = rownames(mat3)); rownames(rd) <- rd$feature_id + se_out <- SummarizedExperiment::SummarizedExperiment( + assays = S4Vectors::SimpleList(values = mat3), + rowData = rd, + colData = SummarizedExperiment::colData(se)[colnames(mat3), , drop = FALSE] + ) + message(sprintf(" - Normalized assay dims: %d feats × %d samples", nrow(se_out), ncol(se_out))) + se_out +} + +# Function to Combine Data (batches) + +combine_batches_intersection <- function(se_list) { + # Combine multiple normalized batches by intersecting shared features (rownames), + # then concatenating samples (cbind). Also stacks colData. + # Inputs: + # se_list: list of SummarizedExperiment objects (normalized) + # Output: + # SummarizedExperiment containing combined matrix and combined colData + message("Combining batches (intersection of features, then cbind samples)") + mats <- lapply(se_list, function(se) as.matrix(SummarizedExperiment::assay(se, "values"))) + # Keep shared features across batches for cbind. + feats <- Reduce(intersect, lapply(mats, rownames)) + feats <- feats[!is.na(feats) & feats != ""] + message(" - Intersection feature count: ", length(feats)) + if (length(feats) == 0) stop("No common features across batches after cleaning feature IDs.") + matsI <- lapply(seq_along(mats), function(i) { + m <- mats[[i]][feats, , drop = FALSE] + message(sprintf(" Batch %d: %d feats × %d samples after intersect", i, nrow(m), ncol(m))) + m + }) + matCB <- do.call(cbind, matsI) + + cd <- do.call(S4Vectors::rbind, lapply(se_list, SummarizedExperiment::colData)) + rd <- S4Vectors::DataFrame(feature_id = rownames(matCB)); rownames(rd) <- rd$feature_id + + se <- SummarizedExperiment( + assays = S4Vectors::SimpleList(values = matCB), + rowData = rd, + colData = cd + ) + message(sprintf(" - Combined assay dims: %d feats × %d samples", nrow(se), ncol(se))) + message(" - Head(sample names) in combined assay:") + # print(utils::head(colnames(SummarizedExperiment::assay(se, "values")), 6)) + message(" - Head(rownames) in combined colData (should match):") + # print(utils::head(rownames(SummarizedExperiment::colData(se)), 6)) + invisible(se) +} + +# Combat Function. (Lots of messages to help debug) + +combat_by_cohort <- function(se) { + # Batch-correct the combined matrix using ComBat (sva) with colData$cohort as the + # batch variable. Replaces non-finite values with 0 before correction. + # Inputs: + # se: SummarizedExperiment with assay "values" and colData column "cohort" + # Output: + # SummarizedExperiment with batch-corrected assay "values" + message("Running ComBat by cohort (batch-only; mean.only = FALSE)") + suppressPackageStartupMessages(library(sva)) + + mat <- as.matrix(SummarizedExperiment::assay(se, "values")) + message(sprintf("Matrix dims before ComBat: %d features × %d samples", nrow(mat), ncol(mat))) + + n_bad <- sum(!is.finite(mat)) + if (n_bad > 0) message(" Replacing ", n_bad, " non-finite values with 0.") + mat[!is.finite(mat)] <- 0 + + cd <- as.data.frame(SummarizedExperiment::colData(se)) + # Align colData order to matrix column order. + cd <- cd[colnames(mat), , drop = FALSE] + if (!"cohort" %in% names(cd)) stop("colData must contain 'cohort' for ComBat batching.") + + batch <- droplevels(as.factor(cd$cohort)) + # message(" Batch table (pre-drop):"); print(table(batch, useNA = "ifany")) + + keep <- !is.na(batch) + if (any(!keep)) { + message(" Dropping ", sum(!keep), " samples with NA cohort before ComBat.") + mat <- mat[, keep, drop = FALSE] + batch <- droplevels(batch[keep]) + cd <- cd[keep, , drop = FALSE] + } + + # message(" Final check — ncol(mat)=", ncol(mat), "; length(batch)=", length(batch)) + # message(" Batch table (final):"); print(table(batch, useNA = "ifany")) + + pre_by_cohort <- tapply(colMeans(mat), batch, sd) + # message(" Pre-ComBat: SD of column means by cohort:"); print(pre_by_cohort) + + cb <- sva::ComBat(dat = mat, batch = batch, mean.only = FALSE, par.prior = TRUE) + + post_by_cohort <- tapply(colMeans(cb), batch, sd) + # message(" Post-ComBat: SD of column means by cohort:"); print(post_by_cohort) + + SummarizedExperiment::assay(se, "values") <- cb + # message(" Matrix dims after ComBat: ", nrow(cb), " × ", ncol(cb)) + invisible(se) +} + +# Plot Functions (PCA) + more debug messages + +se_to_long <- function(se, modality) { + # Convert a SummarizedExperiment matrix into long format (one row per feature-sample + # pair) and join sample metadata from colData. + # Inputs: + # se: SummarizedExperiment with assay "values" + # modality: "phospho", "global", or "rna" (controls feature column name) + # Output: + # data.frame in long format with correctedAbundance + sample metadata columns + feature_col <- if (tolower(modality) == "global") "Gene" else "feature_id" + + avals <- as.data.frame(SummarizedExperiment::assay(se, "values"), check.names = FALSE) + avals[[feature_col]] <- rownames(avals) + + long <- avals |> + tidyr::pivot_longer(cols = -all_of(feature_col), names_to = "fname", values_to = "correctedAbundance") + + cd <- coldata_tbl(se) + + if (!"aliquot" %in% names(cd)) cd$aliquot <- NA_real_ + if ("aliquot.x" %in% names(cd) || "aliquot.y" %in% names(cd)) { + cd$aliquot <- dplyr::coalesce(cd$aliquot, cd$aliquot.x, cd$aliquot.y) + } + + want <- c("fname","aliquot","cohort","Specimen","Patient","Tumor", + "fname_base","fname_stem", + "sample_id","tumor","condition_raw","condition_norm","specimen_norm") + have <- intersect(want, names(cd)) + long1 <- dplyr::left_join(long, cd[, have, drop = FALSE], by = "fname") + + message(" - se_to_long(): non-NA counts to Patient=", + sum(!is.na(long1$Patient)), "; Tumor=", sum(!is.na(long1$Tumor)), + "; Specimen=", sum(!is.na(long1$Specimen)), "; cohort=", sum(!is.na(long1$cohort))) + distinct(long1) +} + +pca_df_present_in_all <- function(se) { + # Prepare a PCA data.frame using only features that are complete (finite) across + # all samples. Joins PCA scores with sample metadata for plotting. + # Inputs: + # se: SummarizedExperiment with assay "values" + # Output: + # data.frame with PC1/PC2 and metadata columns (Patient/Tumor/Specimen/cohort, etc.) + message("Preparing PCA (features present in ALL samples)") + mat <- as.matrix(SummarizedExperiment::assay(se, "values")) + + keep_rows <- apply(mat, 1, function(r) all(is.finite(r))) + n_keep <- sum(keep_rows); n_all <- nrow(mat) + message(" - Kept ", n_keep, " / ", n_all, " features with complete data for PCA") + if (n_keep < 2) stop("Too few complete features for PCA after intersection filter.") + + pcs <- prcomp(t(mat[keep_rows, , drop = FALSE])) + + cd <- coldata_tbl(se) + df <- as.data.frame(pcs$x[, 1:2, drop = FALSE]) + df$fname <- rownames(df) + + df1 <- dplyr::left_join(df, cd, by = "fname") + + message(" - colData columns present: ", paste(setdiff(names(cd), c("fname","fname_base","fname_stem")), collapse = ", ")) + message(" - Non-NA counts in colData: Patient=", sum(!is.na(cd$Patient)), + "; Tumor=", sum(!is.na(cd$Tumor)), "; Specimen=", sum(!is.na(cd$Specimen)), + "; cohort=", sum(!is.na(cd$cohort))) + message(" - After join: n rows = ", nrow(df1)) + message(" - Non-NA counts after join: Patient=", sum(!is.na(df1$Patient)), + "; Tumor=", sum(!is.na(df1$Tumor)), "; Specimen=", sum(!is.na(df1$Specimen)), + "; cohort=", sum(!is.na(df1$cohort))) + + if (sum(!is.na(df1$Patient)) == 0) { + message(" ! Warning: Patient is NA for all samples after join. Will color/shape by cohort.") + df1$Patient_fallback <- as.character(df1$cohort) + df1$Tumor_fallback <- as.character(df1$cohort) + # message(" - DEBUG: head(df1$fname):"); print(utils::head(df1$fname, 6)) + # message(" - DEBUG: head(cd$fname):"); print(utils::head(cd$fname, 6)) + } + + if ("Specimen" %in% names(df1)) { + df1$Tumor <- stringr::str_remove(stringr::str_extract(df1$Specimen, "_T\\d+"), "^_") + } + + df1 +} + +plot_pca <- function(pc_df, title_text, pcols = NULL) { + # Create a PCA scatter plot (PC1 vs PC2), choosing a sensible color/shape mapping + # based on available metadata (prefers Patient/Tumor; falls back to cohort/condition). + # Inputs: + # pc_df: data.frame returned by pca_df_present_in_all() + # title_text: plot title string + # pcols: optional named vector of colors for Patient values + # Output: + # ggplot object (PCA scatter) + color_col <- if ("Patient" %in% names(pc_df) && any(!is.na(pc_df$Patient))) { + "Patient" + } else if ("condition_norm" %in% names(pc_df) && any(!is.na(pc_df$condition_norm))) { + "condition_norm" + } else if ("tumor" %in% names(pc_df) && any(!is.na(pc_df$tumor))) { + "tumor" + } else { + "Patient_fallback" + } + + shape_col <- if ("Tumor" %in% names(pc_df) && any(!is.na(pc_df$Tumor))) { + "Tumor" + } else if ("tumor" %in% names(pc_df) && any(!is.na(pc_df$tumor))) { + "tumor" + } else { + "Tumor_fallback" + } + + g <- ggplot(pc_df, aes(PC1, PC2, col = .data[[color_col]])) + + geom_point(aes(shape = .data[[shape_col]]), size = 3) + + labs(title = title_text, color = color_col, shape = shape_col) + + theme_bw() + if (!is.null(pcols) && color_col == "Patient") g <- g + scale_color_manual(values = pcols) + print(g) + g +} + +plot_hist <- function(se, title_text) { + # Plot a histogram of all assay values, filled by cohort, to visualize distributions + # (e.g., pre- vs post-ComBat). + # Inputs: + # se: SummarizedExperiment with assay "values" + # title_text: plot title string + # Output: + # ggplot object (histogram) + cd <- coldata_tbl(se) + df <- as.data.frame(SummarizedExperiment::assay(se, "values")) |> + tidyr::pivot_longer(everything(), names_to = "fname", values_to = "val") |> + dplyr::left_join(cd[, c("fname","cohort")], by = "fname") + g <- ggplot(df, aes(x = val, fill = as.factor(cohort))) + + geom_histogram(bins = 60, alpha = 0.9) + + labs(title = title_text, x = "Value", fill = "Cohort") + + theme_bw() + print(g) + g +} + +# Upload function + +perform_uploads <- function(paths, syn, parent_id) { + # Upload a set of local output files to a Synapse folder/project using syn$store(). + # Inputs: + # paths: character vector of local file paths + # syn: Synapse client object (synapser) + # parent_id: Synapse folder/project ID to store into + # Output: + # invisible(NULL); side-effect is file uploads to Synapse + if (is.null(parent_id) || length(paths) == 0) return(invisible(NULL)) + message("All steps succeeded — uploading ", length(paths), " file(s) to Synapse…") + for (p in paths) { + fullp <- normalizePath(p, winslash = "/", mustWork = FALSE) + message(" Uploading: ", basename(p), " (", fullp, ")") + f <- syn$store(synapser::File(p, parentId = parent_id)) + message(" Uploaded: ", basename(p), " (local: ", fullp, ") to Synapse ID: ", f$properties$id) + } + message("Uploads complete.") +} + +##### +# Main entry +##### +# This is how we call the function / pipeline + +# Example batches value: +# batches <- list( +# list(syn_id = "syn69963552", cohort = 1, value_start_col = 5, fname_aliquot_index = 8), +# list(syn_id = "syn69947351", cohort = 2, value_start_col = 5, fname_aliquot_index = 9) +# ) + + +run_modality <- function( + # End-to-end normalization pipeline for one modality across one or more batches: + # - read wide tables from Synapse + # - build feature IDs + construct SummarizedExperiment per batch + # - modality-specific normalization per batch + # - combine batches on shared feature intersection + # - QC plots (PCA + hist) pre and post + # - optional ComBat batch correction by cohort + # - export long CSVs and optional Synapse uploads + # Inputs: + # modality: "phospho", "global", or "rna" + # batches: list of batch configs (syn_id, cohort, optional parsing hints) + # meta: sample metadata table for joining + # syn: Synapse client object + # drop_name_substrings: optional regex patterns to drop sample columns + # out_dir: directory for outputs + # out_prefix: base name for outputs (defaults from modality) + # upload_parent_id: Synapse folder/project ID for uploads (optional) + # pcols: optional named color vector for Patient PCA coloring + # write_outputs: TRUE/FALSE to write CSV/PDF and upload + # save_basename: override base output stem + # do_batch_correct: TRUE/FALSE to run ComBat + # Output: + # list containing SE objects, long tables, PCA data, plot objects, and written file paths + modality, # Which data type to run: "phospho", "global", or "rna" + batches, # List of batch configs (each element should include at least: syn_id, cohort; optionally: value_start_col, fname_aliquot_index) + meta, # Sample metadata table used to join batch sample IDs to Patient/Tumor/Specimen - cnF_helper_code.R creates this. + syn, # Synapse client object used for synapse reading/upload (synapser) + drop_name_substrings = NULL, # Regex pattern - sample columns matching any pattern will be removed (QC/blank runs) + out_dir = ".", # Output directory for generated CSV/PDF files + out_prefix = NULL, # Default base name for output files; if NULL (recommended), derived from modality (sanitized) + upload_parent_id = NULL, # Synapse project ID to upload outputs into (ignored if NULL or write_outputs=FALSE) + pcols = NULL, # Color vector for PCA plotting (names should match Patient IDs) - cnF_helper_code.R creates this. + write_outputs = TRUE, # master toggle to write CSV/PDF & upload + save_basename = NULL, # override base name used in output files + do_batch_correct = TRUE # if FALSE, skip ComBat and use combined matrix +) { + message("==================================================") + message("Starting run_modality(): ", modality) + message("Output directory: ", normalizePath(out_dir, winslash = "/", mustWork = FALSE)) + if (!dir.exists(out_dir)) dir.create(out_dir, recursive = TRUE) + if (is.null(out_prefix)) out_prefix <- gsub("[^A-Za-z0-9]+", "_", tolower(modality)) + file_stem <- if (!is.null(save_basename) && nzchar(save_basename)) save_basename else out_prefix + + upload_queue <- character(0) + results <- NULL + + tryCatch({ + drop_name <- make_dropper(drop_name_substrings) + build_ids <- pick_builder(modality) + + # ---- Per-batch normalization ------------------------------------------------ + se_list <- vector("list", length(batches)) + for (i in seq_along(batches)) { + message("--------------------------------------------------") + message("Batch ", i, " of ", length(batches)) + b <- batches[[i]] + + wide <- read_wide_from_synapse(syn, b$syn_id) + + if (tolower(modality) == "phospho") { + message(" Preprocessing phospho table: drop blank Gene.Names; build site ids") + wide <- wide %>% dplyr::filter(!is.na(.data$Gene.Names), .data$Gene.Names != "") + } else if (tolower(modality) == "global") { + message(" Preprocessing global table: split multi-symbol rows (Genes by ';')") + wide <- tidyr::separate_rows(wide, Genes, sep = ";") |> + dplyr::mutate(Genes = trimws(Genes)) |> + dplyr::filter(!is.na(Genes) & Genes != "") + } else if (tolower(modality) == "rna") { + message(" Preprocessing RNA table: (using merged Salmon matrix; samples start after gene_id/gene_name)") + } + + feats <- build_ids(wide) + ok <- !is.na(feats) & feats != "" + if (!all(ok)) { + message(" Dropping ", sum(!ok), " empty/NA feature IDs before SE construction.") + wide <- wide[ok, , drop = FALSE] + feats <- feats[ok] + } + + fallback_col <- if (tolower(modality) == "rna") 3 else 5 + first_col <- if (!is.null(b$value_start_col)) b$value_start_col else detect_value_start_col(wide, fallback = fallback_col) + + fnmap <- parse_fnames(colnames(wide)[first_col:ncol(wide)], b$fname_aliquot_index, b$cohort) + if (tolower(modality) == "rna" && any(is.na(fnmap$aliquot))) { + message(" ! Note: ", sum(is.na(fnmap$aliquot)), " RNA sample(s) with NA aliquot after parsing (expected for RNA headers).") + } + + se0 <- make_se(wide, value_start_col = first_col, feature_ids = feats, + fnames_df = fnmap, meta = meta, drop_name = drop_name, modality = modality) + se_n <- normalize_by_modality(se0, modality) + se_list[[i]] <- se_n + + if (write_outputs) { + message(" Writing per-batch normalized long table (pre-ComBat)") + batch_long <- se_to_long(se_n, modality) + batch_tag <- paste0("batch", b$cohort) + batch_path <- file.path(out_dir, paste0(file_stem, "_", batch_tag, "_normalized_long.csv")) + readr::write_csv(batch_long, batch_path) + upload_queue <- c(upload_queue, batch_path) + } + } + + # Combine & pre-QC + message("--------------------------------------------------") + se_combined <- combine_batches_intersection(se_list) + + message(" Pre-QC plots (PCA & histogram) on combined (pre-ComBat)") + pre_pc_df <- pca_df_present_in_all(se_combined) + pre_pca <- plot_pca(pre_pc_df, paste0(modality, " samples"), pcols = pcols) + pre_hist <- plot_hist(se_combined, paste0(modality, ": value distribution (pre-ComBat)")) + if (write_outputs) { + pre_pca_pdf <- file.path(out_dir, paste0(file_stem, "_preComBat_PCA.pdf")) + pre_hist_pdf <- file.path(out_dir, paste0(file_stem, "_preComBat_Hist.pdf")) + ggsave(pre_pca_pdf, pre_pca, width = 7, height = 4.5, device = cairo_pdf) + ggsave(pre_hist_pdf, pre_hist, width = 7, height = 4.5, device = cairo_pdf) + upload_queue <- c(upload_queue, pre_pca_pdf, pre_hist_pdf) + } + + # ComBat + if (isTRUE(do_batch_correct)) { + message("--------------------------------------------------") + se_post <- combat_by_cohort(se_combined) + post_suffix <- "_batchCorrected" + post_title <- paste0("Batch-corrected ", modality, " samples") + } else { + message("--------------------------------------------------") + message("Skipping ComBat per do_batch_correct=FALSE; using combined matrix as 'post'.") + se_post <- se_combined + post_suffix <- "_noBatchCorrect" + post_title <- paste0("Combined ", modality, " samples (no ComBat)") + } + + # Exports + message(" Building long tables") + long_pre <- se_to_long(se_combined, modality) |> + dplyr::filter(is.finite(correctedAbundance)) + long_post <- se_to_long(se_post, modality) + + if (write_outputs) { + path_pre <- file.path(out_dir, paste0(file_stem, "_preComBat_long.csv")) + path_post <- file.path(out_dir, paste0(file_stem, post_suffix, ".csv")) + write_csv(long_pre, path_pre) + write_csv(long_post, path_post) + upload_queue <- c(upload_queue, path_pre, path_post) + } + + # Post ComBat/QC + message(" Post-QC plots (PCA & histogram)") + pc_df <- pca_df_present_in_all(se_post) + gpca <- plot_pca(pc_df, post_title, pcols = pcols) + ghist <- plot_hist(se_post, paste0(modality, ": value distribution", ifelse(isTRUE(do_batch_correct), "", " (no ComBat)"))) + if (write_outputs) { + post_pca_pdf <- file.path(out_dir, paste0(file_stem, ifelse(isTRUE(do_batch_correct), "_PCA.pdf", "_PCA_noComBat.pdf"))) + post_hist_pdf <- file.path(out_dir, paste0(file_stem, ifelse(isTRUE(do_batch_correct), "_Hist.pdf", "_Hist_noComBat.pdf"))) + ggsave(post_pca_pdf, gpca, width = 7, height = 4.5, device = cairo_pdf) + ggsave(post_hist_pdf, ghist, width = 7, height = 4.5, device = cairo_pdf) + upload_queue <- c(upload_queue, post_pca_pdf, post_hist_pdf) + } + + # Pack results for return + results <- list( + se_batches = se_list, + se_combined = se_combined, + se_corrected = if (isTRUE(do_batch_correct)) se_post else NULL, + se_post = se_post, # always populated (corrected or not) + did_combat = isTRUE(do_batch_correct), + long_pre = long_pre, + long_post = long_post, + pca_df_pre = pre_pc_df, + pca_df_post = pc_df, + plots = list(pre_pca = pre_pca, pre_hist = pre_hist, pca = gpca, hist = ghist), + files = if (write_outputs) list(queued = upload_queue) else list() + ) + + # FINAL STEP: Uploads + if (write_outputs && !is.null(upload_parent_id)) { + perform_uploads(upload_queue, syn, upload_parent_id) + } else if (!write_outputs) { + message("write_outputs=FALSE — skipping all writes/uploads.") + } else { + message("No upload_parent_id provided — skipping uploads.") + } + + }, error = function(e) { + message("ERROR: run_modality() failed for ", modality, ". No uploads were attempted.") + message(" ", conditionMessage(e)) + message("------- DEBUG SNAPSHOT -------") + message(" traceback:"); print(sys.calls()) + message(" sessionInfo():"); print(utils::sessionInfo()) + message("------- END DEBUG -----------") + stop(e) + }) + + message("run_modality() finished successfully for: ", modality) + results +} diff --git a/source/02_analyze_modality_correlations.R b/source/02_analyze_modality_correlations.R new file mode 100644 index 0000000..b219816 --- /dev/null +++ b/source/02_analyze_modality_correlations.R @@ -0,0 +1,474 @@ +# --------------------------------------------------------------------------- +# 02_analyze_modality_correlations.R +# --------------------------------------------------------------------------- +# Purpose +# - Given (1) drug response fits and (2) a long-format omics table for one modality, +# this script builds sample x drug and sample x feature matrices, makes a few summary +# plots (drug efficacy/variability + optional heatmap), and computes Spearman +# correlations between drug response and molecular features. +# +# Main entry (This does both of the individual calls) +# - analyze_modality(fits, df_long, sample_col, feature_col, value_col, ...) +# +# Individual Calls +# - analyze_drug_response(fits, metric, outdir, heatmap_filename, ...) +# * Drug-only: builds drug_mat and writes 3 plots by default +# - most_efficacious.pdf +# - most_variable.pdf +# - drug heatmap (heatmap_filename) +# - analyze_modality_correlations(df_long, sample_col, feature_col, value_col, drug_mat, ...) +# * Modality-only: builds feat_mat and computes correlations + summary plot +# - cor_features_by_drug.pdf +# +# Inputs +# - fits: long drug response table with improve_sample_id, improve_drug_id, +# dose_response_metric, dose_response_value +# - df_long: long omics table with sample IDs + feature IDs + values +# - sample_col / feature_col / value_col: column names in df_long that identify +# the sample, the molecular feature, and the measurement to analyze +# +# Outputs (written to outdir) +# - most_efficacious.pdf, most_variable.pdf +# - drug_heatmap_large.pdf (optional; only for drugs measured in all samples) +# - cor_features_by_drug.pdf (counts of significant correlated features per drug) +# +# Returns (as a list) +# - drug_mat, feat_mat: wide matrices used for analysis +# - cor_tbl: per drug-feature correlations (Spearman) + p-values + FDR +# - cor_summary / cor_plot: summary of significant correlations +# - drug_summary: per-drug mean response, # measured, and variability +# --------------------------------------------------------------------------- + +suppressPackageStartupMessages({ + library(dplyr) + library(tidyr) + library(tibble) + library(ggplot2) + library(pheatmap) +}) + +dir.create("figs", showWarnings = FALSE) + + +# Helpers + +make_feature_matrix <- function(df_long, shared_ids, sample_col, feature_col, value_col) { + # Build a sample × feature matrix from a long-format omics table, restricted to a set + # of shared sample IDs. Cleans IDs, drops blank IDs, pivots to wide, fills missing with 0, + # and averages duplicates (mean) per sample-feature pair. + # Inputs: + # df_long: long omics data.frame + # shared_ids: character vector of sample IDs to keep + # sample_col: column name in df_long for sample ID + # feature_col: column name in df_long for feature ID + # value_col: column name in df_long for numeric value + # Output: + # data.frame (wide) with rownames = samples and columns = features + df <- df_long %>% + ungroup() %>% + dplyr::filter(.data[[sample_col]] %in% shared_ids) %>% + mutate( + !!sample_col := trimws(as.character(.data[[sample_col]])), + !!feature_col := trimws(as.character(.data[[feature_col]])) + ) + + # Drop rows with NA/blank sample/feature IDs + bad_sample <- is.na(df[[sample_col]]) | df[[sample_col]] == "" + bad_feature <- is.na(df[[feature_col]]) | df[[feature_col]] == "" + df <- df[!(bad_sample | bad_feature), , drop = FALSE] + + if (!nrow(df)) { + warning("[make_feature_matrix] No rows left after cleaning; returning empty frame.") + out <- data.frame(check.names = FALSE) + return(out) + } + + # Pivot to wide + wide <- df %>% + dplyr::select(all_of(c(sample_col, feature_col, value_col))) %>% + tidyr::pivot_wider( + names_from = all_of(feature_col), + values_from = all_of(value_col), + values_fill = 0, # Missing sample-feature pairs become zero for matrix. + values_fn = mean # Average duplicates per sample-feature after pivot. + ) %>% + as.data.frame(check.names = FALSE) + + rn <- wide[[sample_col]] + bad_rn <- is.na(rn) | rn == "" + if (any(bad_rn)) { + message("[make_feature_matrix] Removing ", sum(bad_rn), " rows with NA/blank rownames after pivot.") + wide <- wide[!bad_rn, , drop = FALSE] + rn <- rn[!bad_rn] + } + + if (!nrow(wide)) { + warning("[make_feature_matrix] Wide table is empty after removing bad rownames; returning empty frame.") + out <- data.frame(check.names = FALSE) + return(out) + } + + # Finalize rownames + rownames(wide) <- make.unique(as.character(rn), sep = "_dup") + wide[[sample_col]] <- NULL + wide +} + +make_drug_matrix <- function( + # Build a sample × drug response matrix from the long drug fits table for a chosen metric + # (e.g., uM_viability). Pivots to wide and averages duplicates (mean). + # Inputs: + # fits: long drug response data.frame + # metric: metric value to select from metric_col (default "uM_viability") + # sample_col: sample ID column name in fits (default "improve_sample_id") + # drug_col: drug ID column name in fits (default "improve_drug_id") + # value_col: response value column name in fits (default "dose_response_value") + # metric_col: metric label column name in fits (default "dose_response_metric") + # Output: + # data.frame (wide) with rownames = samples and columns = drugs + fits, metric = "uM_viability", + sample_col = "improve_sample_id", + drug_col = "improve_drug_id", + value_col = "dose_response_value", + metric_col = "dose_response_metric" +) { + fits %>% + dplyr::filter(.data[[metric_col]] == metric) %>% + dplyr::select(all_of(c(sample_col, drug_col, value_col))) %>% + tidyr::pivot_wider( + names_from = all_of(drug_col), + values_from = all_of(value_col), + values_fn = mean # Average replicate drug responses per sample. + ) %>% + tibble::column_to_rownames(sample_col) +} + +summarize_drugs <- function( + # Summarize per-drug response for a selected metric and write two PDF scatter plots: + # (1) "most_efficacious" (low mean viability) and (2) "most_variable" (high SD). + # Inputs: + # fits: long drug response data.frame + # metric: metric to analyze (default "uM_viability") + # metric_col: column holding metric labels (default "dose_response_metric") + # outdir: directory to write PDFs (default "figs") + # rotate_x: x-axis label rotation angle for readability + # Output: + # list with: + # summary: data.frame of per-drug meanResponse, nMeasured, variability + # p_eff: ggplot object for efficacious drugs + # p_var: ggplot object for variable drugs + fits, metric = "uM_viability", metric_col = "dose_response_metric", + outdir = "figs", rotate_x = 45 +) { + ds <- fits %>% + dplyr::filter(.data[[metric_col]] == metric) %>% + group_by(.data$improve_drug_id) %>% + distinct() %>% + summarize( + meanResponse = mean(.data$dose_response_value, na.rm = TRUE), + nMeasured = n_distinct(.data$improve_sample_id), + variability = sd(.data$dose_response_value, na.rm = TRUE), + .groups = "drop" + ) + + p_eff <- ds %>% + arrange(desc(.data$meanResponse)) %>% + dplyr::filter(.data$meanResponse < 0.5) %>% + ggplot(aes(y = .data$meanResponse, x = .data$improve_drug_id, + colour = .data$nMeasured, size = .data$variability)) + + geom_point() + + theme_minimal() + + theme(axis.text.x = element_text(angle = rotate_x, hjust = 1)) + + labs(title = "Most efficacious drugs", + y = "Mean cell viability (fraction)", x = "Drug") + + p_var <- ds %>% + arrange(desc(.data$variability)) %>% + dplyr::filter(.data$variability > 0.15) %>% + ggplot(aes(y = .data$meanResponse, x = .data$improve_drug_id, + colour = .data$nMeasured, size = .data$variability)) + + geom_point() + + theme_minimal() + + theme(axis.text.x = element_text(angle = rotate_x, hjust = 1)) + + labs(title = "Most variable drugs", + y = "Mean cell viability (fraction)", x = "Drug") + + ggsave(file.path(outdir, "most_efficacious.pdf"), p_eff, width = 12, height = 8, dpi = 300) + ggsave(file.path(outdir, "most_variable.pdf"), p_var, width = 12, height = 8, dpi = 300) + + list(summary = ds, p_eff = p_eff, p_var = p_var) +} + +compute_cors <- function(drug_mat, feat_mat, shared_samples = NULL) { + # Compute Spearman correlations between each drug response column and each feature column + # across shared samples. Also computes per-pair p-values (cor.test) when enough data exists + # and applies BH FDR correction. + # Inputs: + # drug_mat: numeric matrix/data.frame (samples × drugs), rownames = sample IDs + # feat_mat: numeric matrix/data.frame (samples × features), rownames = sample IDs + # shared_samples: optional character vector of sample IDs to use; if NULL, uses rowname intersection + # Output: + # tibble/data.frame with columns: drug, feature, cor, pval, fdr, direction + if (is.null(shared_samples)) { + shared_samples <- base::intersect(rownames(drug_mat), rownames(feat_mat)) + } + if (length(shared_samples) == 0L) { + return(tibble( + drug = character(), feature = character(), + cor = numeric(), pval = numeric(), fdr = numeric(), + direction = character() + )) + } + + drug_mat <- drug_mat[shared_samples, , drop = FALSE] + feat_mat <- feat_mat[shared_samples, , drop = FALSE] + + cres <- suppressWarnings( # Fast Spearman matrix using pairwise complete observations. + stats::cor(drug_mat, feat_mat, use = "pairwise.complete.obs", method = "spearman") + ) %>% + as.data.frame() %>% + tibble::rownames_to_column("drug") %>% + tidyr::pivot_longer(cols = - "drug", names_to = "feature", values_to = "cor") + + csig <- do.call(rbind, lapply(colnames(drug_mat), function(d) { + do.call(rbind, lapply(colnames(feat_mat), function(f) { + dv <- drug_mat[, d]; fv <- feat_mat[, f] + p <- NA_real_ + if (sum(is.finite(dv) & is.finite(fv)) >= 3) { # Require ≥3 finite pairs for cor.test p-value. + p <- tryCatch( + stats::cor.test(dv, fv, method = "spearman", use = "pairwise.complete.obs")$p.value, + error = function(e) NA_real_ + ) + } + c(drug = d, feature = f, pval = p) + })) %>% + as.data.frame() + })) %>% + as.data.frame() %>% + mutate(pval = as.numeric(.data$pval)) %>% + mutate(fdr = p.adjust(.data$pval, method = "BH")) + + left_join(cres, csig, by = c("drug","feature")) %>% + mutate(direction = ifelse(.data$cor < 0, "neg", "pos")) +} + +summarize_correlated_features <- function(cor_tbl, fdr_thresh = 0.25, outdir = "figs") { + # Summarize significant drug-feature associations by counting how many features are + # significantly correlated with each drug (split by positive/negative direction), + # and write a bar plot PDF. + # Inputs: + # cor_tbl: correlation table from compute_cors() + # fdr_thresh: significance threshold on FDR (default 0.25) + # outdir: directory to write the PDF (default "figs") + # Output: + # list with: + # summary: tibble of per-drug counts and mean correlation by direction + # plot: ggplot object (or NULL if no significant results) + if (nrow(cor_tbl) == 0L) return(list(summary = tibble(), plot = NULL)) + corsummary <- cor_tbl %>% + dplyr::filter(is.finite(.data$fdr), !is.na(.data$fdr), .data$fdr < fdr_thresh) %>% + mutate(direction = ifelse(.data$cor > 0, "pos", "neg")) %>% + group_by(.data$drug, .data$direction) %>% + summarize(features = n(), meanCor = mean(.data$cor), .groups = "drop") + + if (nrow(corsummary) == 0L) return(list(summary = corsummary, plot = NULL)) + + p <- corsummary %>% + dplyr::filter(.data$features > 1) %>% + ggplot(aes(x = .data$drug, y = .data$features, fill = .data$direction)) + + geom_col(position = "dodge") + + theme_minimal() + + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1)) + + labs(title = paste0("Significant feature counts per drug (FDR < ", fdr_thresh, ")"), + x = "Drug", y = "# Features") + + ggsave(file.path(outdir, "cor_features_by_drug.pdf"), p, width = 12, height = 6, units = "in") + list(summary = corsummary, plot = p) +} + + +# --------------------------- +# Drug-only analysis stage +# --------------------------- +analyze_drug_response <- function( + # Drug-only workflow: + # - builds sample x drug matrix for a metric + # - writes drug summary plots (most_efficacious, most_variable) + # - writes drug heatmap (by default) for drugs measured in all samples + # Inputs: + # fits: long drug response table + # metric: drug response metric to analyze (default "uM_viability") + # outdir: output directory for plots (default "figs") + # heatmap_filename: filename for drug heatmap PDF; set NULL to skip + # Output: + # list containing: + # drug_mat, drug_summary, p_eff, p_var + fits, + metric = "uM_viability", + outdir = "figs", + heatmap_filename = "drug_heatmap_large.pdf" +) { + + dir.create(outdir, showWarnings = FALSE, recursive = TRUE) + + # Drug matrix for the metric + drug_mat <- make_drug_matrix( + fits = fits, + metric = metric, + sample_col = "improve_sample_id", + drug_col = "improve_drug_id", + value_col = "dose_response_value", + metric_col = "dose_response_metric" + ) + + # Summaries (writes most_efficacious.pdf + most_variable.pdf) + dsum <- summarize_drugs( + fits, metric = metric, metric_col = "dose_response_metric", outdir = outdir + ) + + # Heatmap (by default) + if (!is.null(heatmap_filename) && nrow(drug_mat) > 0 && ncol(drug_mat) > 0) { + fulldrugs <- dsum$summary %>% # Heatmap for only drugs measured in all samples. + dplyr::filter(.data$nMeasured == nrow(drug_mat)) %>% + pull(.data$improve_drug_id) + + subm <- drug_mat[, colnames(drug_mat) %in% fulldrugs, drop = FALSE] + if (nrow(subm) > 1 && ncol(subm) > 0) { + pheatmap::pheatmap( + as.matrix(subm), + filename = file.path(outdir, heatmap_filename), + width = 28, height = 16, + angle_col = 45, fontsize_col = 6, + cluster_rows = TRUE, cluster_cols = TRUE, + show_rownames = TRUE, show_colnames = TRUE + ) + } + } + + list( + drug_mat = drug_mat, + drug_summary = dsum$summary, + p_eff = dsum$p_eff, + p_var = dsum$p_var + ) +} + + +# --------------------------- +# Modality-only analysis stage +# --------------------------- +analyze_modality_correlations <- function( + # Modality-only workflow: + # - aligns samples shared between drug_mat and omics table + # - builds sample x feature matrix + # - computes drug-feature Spearman correlations + p-values + FDR + # - summarizes significant features per drug and writes a summary plot + # Inputs: + # df_long: long omics table for one modality + # sample_col: sample ID column name in df_long (e.g., "Specimen") + # feature_col: feature ID column name in df_long (e.g., "feature_id" or "Gene") + # value_col: numeric value column name in df_long (e.g., "correctedAbundance") + # drug_mat: sample x drug matrix (rownames = sample IDs) + # outdir: output directory for plots (default "figs") + # fdr_thresh: FDR cutoff used for correlation summary (default 0.25) + # Output: + # list containing feat_mat, cor_tbl, cor_summary, cor_plot, shared_ids + df_long, + sample_col, + feature_col, + value_col, + drug_mat, + outdir = "figs", + fdr_thresh = 0.25 +) { + + dir.create(outdir, showWarnings = FALSE, recursive = TRUE) + + shared_ids <- base::intersect(rownames(drug_mat), unique(df_long[[sample_col]])) + + feat_mat <- make_feature_matrix( + df_long = df_long, + shared_ids = shared_ids, + sample_col = sample_col, + feature_col = feature_col, + value_col = value_col + ) + + # Correlations + shared_after <- base::intersect(rownames(drug_mat), rownames(feat_mat)) + cor_tbl <- if (length(shared_after) > 0L) { + compute_cors(drug_mat, feat_mat, shared_samples = shared_after) + } else { + tibble(drug = character(), feature = character(), cor = numeric(), + pval = numeric(), fdr = numeric(), direction = character()) + } + cor_res <- summarize_correlated_features(cor_tbl, fdr_thresh = fdr_thresh, outdir = outdir) + + list( + feat_mat = feat_mat, + shared_ids = shared_ids, + cor_tbl = cor_tbl, + cor_summary = cor_res$summary, + cor_plot = cor_res$plot + ) +} + + +# --------------------------- +# Main wrapper (backwards compatible) +# --------------------------- +analyze_modality <- function( + # End-to-end wrapper for running one omics modality: + # - runs drug-only analysis (drug_mat + drug plots + heatmap) + # - runs modality-only correlations (feat_mat + cor_tbl + summary plot) + # Inputs: + # fits: long drug response table (must include improve_sample_id, improve_drug_id, dose_response_metric, dose_response_value) + # df_long: long omics table for one modality + # sample_col: sample ID column name in df_long (e.g., "Specimen") + # feature_col: feature ID column name in df_long (e.g., "feature_id" or "Gene") + # value_col: numeric value column name in df_long (e.g., "correctedAbundance") + # metric: drug response metric to analyze (default "uM_viability") + # outdir: output directory for plots (default "figs") + # heatmap_filename: filename for drug heatmap PDF; set NULL to skip + # fdr_thresh: FDR cutoff used for correlation summary (default 0.25) + # Output: + # list containing matrices, correlation results, summaries, and (optionally) plot objects: + # drug_mat, feat_mat, shared_ids, cor_tbl, cor_summary, cor_plot, drug_summary + fits, + df_long, + sample_col, # e.g., "Specimen" + feature_col, # e.g., "feature_id" | "Gene" | "site" + value_col, # e.g., "correctedAbundance" + metric = "uM_viability", # Or fit_auc + outdir = "figs", + heatmap_filename = "drug_heatmap_large.pdf", + fdr_thresh = 0.25 +) { + + drug_res <- analyze_drug_response( + fits = fits, + metric = metric, + outdir = outdir, + heatmap_filename = heatmap_filename + ) + + mod_res <- analyze_modality_correlations( + df_long = df_long, + sample_col = sample_col, + feature_col = feature_col, + value_col = value_col, + drug_mat = drug_res$drug_mat, + outdir = outdir, + fdr_thresh = fdr_thresh + ) + + list( + drug_mat = drug_res$drug_mat, + feat_mat = mod_res$feat_mat, + shared_ids = mod_res$shared_ids, + cor_tbl = mod_res$cor_tbl, + cor_summary = mod_res$cor_summary, + cor_plot = mod_res$cor_plot, + drug_summary = drug_res$drug_summary + ) +} diff --git a/source/03_leapr_biomarker.R b/source/03_leapr_biomarker.R new file mode 100644 index 0000000..a456812 --- /dev/null +++ b/source/03_leapr_biomarker.R @@ -0,0 +1,793 @@ +# --------------------------------------------------------------------------- +# 03_leapr_biomarker.R +# --------------------------------------------------------------------------- +# Purpose +# - For each drug, correlate drug response (uM_viability) with omics features across +# samples, then use leapR to find enriched pathways/genesets among: +# * TOP = features positively correlated with viability (more resistant) +# * BOTTOM = features negatively correlated with viability (more sensitive) +# +# Main entry +# - run_leapr_directional_one_cached(drugs, df_long, sample_col, feature_col, value_col, omic_label, cache_path, ...) +# +# Inputs +# - drugs: long drug-response table with improve_drug_id, improve_sample_id, +# dose_response_metric, dose_response_value +# - df_long: long omics table (sample/feature/value columns) +# - sample_col / feature_col / value_col: column names in df_long that identify +# the sample ID, feature ID, and numeric measurement +# - omic_label: label used for reporting/assay naming (e.g., "global", "rna", "phospho") +# - cache_path: .RData file path used to save/load results (skips recompute unless always_rerun=TRUE) +# +# Options +# - geneset_name / geneset_object: choose the leapR geneset DB (defaults depend on omic_label) +# - min_features: minimum number of correlated features required to run leapR for TOP/BOTTOM +# - write_csvs: write per-drug leapR tables to CSV +# - test_one: run only the first drug - make sure things are actually working. +# +# Outputs +# - Cached results saved to cache_path (if provided) +# - Optional CSVs: leapR_top_paths/dir_split/*_{TOP|BOTTOM}.csv (if write_csvs=TRUE) +# - Plots: save_leapr_plots() writes pathway barplots to figs/pathways_*.pdf +# +# Returns +# - Named list by drug: res_list[[drug]]$top and res_list[[drug]]$bottom (leapR result tables) +# --------------------------------------------------------------------------- + + +suppressPackageStartupMessages({ + library(dplyr) + library(tidyr) + library(readr) + library(stringr) + library(tibble) + library(SummarizedExperiment) + library(S4Vectors) + library(leapR) + library(ggplot2) + library(grDevices) +}) + +# ----------------------------- +# Helpers +# ----------------------------- +long_to_matrix <- function(df_long, sample_col, feature_col, value_col) { + # Convert a long-format omics table into a numeric matrix (rows = samples, cols = features). + # Cleans sample/feature IDs, drops NA/blank IDs, pivots to wide, fills missing with 0, + # and averages duplicates (mean) per sample-feature pair. + # Inputs: + # df_long: long omics data.frame + # sample_col: column name in df_long for sample IDs + # feature_col: column name in df_long for feature IDs + # value_col: column name in df_long for numeric values + # Output: + # numeric matrix with rownames = sample IDs and colnames = feature IDs, or NULL if empty + if (is.null(df_long) || !nrow(df_long)) return(NULL) + + df <- df_long |> + dplyr::mutate( + !!sample_col := trimws(as.character(.data[[sample_col]])), + !!feature_col := trimws(as.character(.data[[feature_col]])) + ) + + # Drop NA/blank sample or feature IDs with messages + bad_sample <- is.na(df[[sample_col]]) | df[[sample_col]] == "" + bad_feature <- is.na(df[[feature_col]]) | df[[feature_col]] == "" + n_bad_s <- sum(bad_sample, na.rm = TRUE) + n_bad_f <- sum(bad_feature, na.rm = TRUE) + if (n_bad_s > 0) message("[long_to_matrix] Dropping ", n_bad_s, " rows with NA/blank ", sample_col) + if (n_bad_f > 0) message("[long_to_matrix] Dropping ", n_bad_f, " rows with NA/blank ", feature_col) + df <- df[!(bad_sample | bad_feature), , drop = FALSE] + + if (!nrow(df)) { + warning("[long_to_matrix] No rows left after removing NA/blank sample/feature IDs.") + return(NULL) + } + + # Pivot to wide + wide <- df %>% + dplyr::select( + !!rlang::sym(sample_col), + !!rlang::sym(feature_col), + !!rlang::sym(value_col) + ) %>% + tidyr::pivot_wider( + names_from = !!rlang::sym(feature_col), + values_from = !!rlang::sym(value_col), + values_fill = 0, # Missing sample-feature pairs become zero after pivot. + values_fn = mean # Average duplicates per sample-feature before matrix conversion. + ) %>% + as.data.frame(check.names = FALSE) + + rn <- wide[[sample_col]] + bad_rn <- is.na(rn) | rn == "" + if (any(bad_rn)) { + message("[long_to_matrix] Removing ", sum(bad_rn), " rows with NA/blank rownames after pivot.") + wide <- wide[!bad_rn, , drop = FALSE] + rn <- rn[!bad_rn] + } + if (!nrow(wide)) { + warning("[long_to_matrix] Wide table is empty after cleaning.") + return(NULL) + } + + rownames(wide) <- make.unique(as.character(rn), sep = "_dup") + wide[[sample_col]] <- NULL + as.matrix(wide) +} + + + +# ---- PHOSPHO +.extract_gene_from_site <- function(site_id) { + # Extract a gene symbol from a phosphosite/site identifier string. Primarily uses the + # substring before the first '-' (e.g., "AAAS-S495s" -> "AAAS"); falls back to splitting + # on common delimiters if needed. + # Inputs: + # site_id: character scalar (site/feature ID) + # Output: + # character scalar gene symbol (uppercase) or NA if not parseable + if (is.na(site_id) || site_id == "") return(NA_character_) + x <- as.character(site_id) + + # Get chars before first '-' (e.g., "AAAS-S495s" -> "AAAS") + gene <- sub("^([^\\-]+)-.*$", "\\1", x, perl = TRUE) + + # If no '-' present, fall back to splitting on common delimiters + if (identical(gene, x)) { + parts <- strsplit(x, "[|:_\\-\\.]", fixed = FALSE)[[1]] + gene <- parts[1] + } + gene <- sub("^([A-Za-z0-9]+).*", "\\1", gene) + gene <- toupper(gene) + if (nchar(gene) == 0) return(NA_character_) + gene +} + +# Build phospho site gene map from long table +.build_phospho_gene_map_from_long <- function(df_long, feature_col) { + # Build a mapping from phosphosite IDs to gene symbols using columns present in the + # long table (preferred). If no suitable gene column exists, falls back to parsing gene + # symbols from the site IDs themselves. + # Inputs: + # df_long: long omics data.frame + # feature_col: column name in df_long containing phosphosite IDs + # Output: + # named character vector mapping site -> gene, or NULL if no sites found + gene_cols <- c("Gene","gene","hgnc_id","hgnc_symbol","protein","Protein","Symbol","symbol") + has <- gene_cols[gene_cols %in% colnames(df_long)] + if (length(has)) { + gcol <- has[[1]] + mp <- df_long %>% + dplyr::select(!!rlang::sym(feature_col), !!rlang::sym(gcol)) %>% + dplyr::rename(site = !!rlang::sym(feature_col), gene = !!rlang::sym(gcol)) %>% + dplyr::mutate(site = trimws(as.character(site)), + gene = toupper(trimws(as.character(gene)))) %>% + dplyr::filter(!is.na(site), site != "", !is.na(gene), gene != "") %>% + dplyr::distinct(site, gene) + if (nrow(mp)) return(setNames(mp$gene, mp$site)) + } + sites <- unique(trimws(as.character(df_long[[feature_col]]))) + sites <- sites[!is.na(sites) & sites != ""] + if (!length(sites)) return(NULL) + genes <- vapply(sites, .extract_gene_from_site, FUN.VALUE = character(1)) + genes[genes == ""] <- NA_character_ + setNames(genes, sites) +} + + +.collapse_sites_to_genes <- function(cor_named_vec, map_site2gene, agg = c("mean","maxabs")) { + # Collapse a named vector of site-level correlation values to gene-level values using a + # site->gene mapping. Supports aggregation by mean or by the max absolute correlation per gene. + # Inputs: + # cor_named_vec: named numeric vector (names = site IDs, values = correlations) + # map_site2gene: named character vector mapping site -> gene + # agg: "mean" or "maxabs" (how to aggregate multiple sites per gene) + # Output: + # named numeric vector (names = genes, values = aggregated correlations) + agg <- match.arg(agg) + if (is.null(map_site2gene) || !length(cor_named_vec)) return(cor_named_vec) + + # Align and drop unmapped + genes <- map_site2gene[names(cor_named_vec)] # Map each site ID to its gene symbol. + keep <- !is.na(genes) & genes != "" + v <- cor_named_vec[keep] + g <- genes[keep] + if (!length(v)) return(setNames(numeric(0), character(0))) + + if (agg == "mean") { + # mean per gene + df <- tibble(gene = g, val = as.numeric(v)) %>% + group_by(gene) %>% summarise(val = mean(val, na.rm = TRUE), .groups = "drop") + out <- stats::setNames(df$val, df$gene) + } else { + # max by absolute value, keep sign + df <- tibble(gene = g, val = as.numeric(v)) %>% + mutate(ord = order(-abs(val))) %>% + group_by(gene) %>% + slice_max(order_by = abs(val), n = 1, with_ties = FALSE) %>% + ungroup() + out <- stats::setNames(df$val, df$gene) + } + out +} + +# ---- Normalize phosphosite IDs to match kinasesubstrates (e.g. "AAAS-S495s" -> "AAAS-S495") +.normalize_kinase_site_id <- function(x) { + # Normalize phosphosite IDs to better match leapR kinasesubstrates formatting by trimming + # trailing lowercase letters (e.g., "AAAS-S495s" -> "AAAS-S495"). + # Inputs: + # x: character vector of phosphosite IDs + # Output: + # character vector of normalized site IDs + x <- as.character(x) + x <- trimws(x) + # Drop trailing lowercase letters + sub("[a-z]+$", "", x) +} + +# Spearman correlations +.col_spearman <- function(vec, mat) { + # Compute Spearman correlation between a drug response vector and each feature column in a + # sample × feature matrix. Uses sample ID intersection and pairwise complete observations. + # Inputs: + # vec: named numeric vector of responses (names = sample IDs) + # mat: numeric matrix (rownames = sample IDs, cols = features) + # Output: + # named numeric vector of correlations (one per feature column; NA where not computable) + shared <- intersect(names(vec), rownames(mat)) + if (length(shared) < 3) return(setNames(rep(NA_real_, ncol(mat)), colnames(mat))) + v <- vec[shared] + m <- as.matrix(mat[shared, , drop = FALSE]) + apply(m, 2, function(col) { + if (all(is.na(col))) return(NA_real_) + if (sd(col, na.rm = TRUE) == 0 || sd(v, na.rm = TRUE) == 0) return(NA_real_) + suppressWarnings(cor(v, col, method = "spearman", use = "pairwise.complete.obs")) + }) +} + +# Build SummarizedExperiment to feed into leapR +.build_se_from_corvec <- function(cor_named_vec, features_all, col_label, + map_to_gene = NULL, assay_label = "proteomics") { + # Build a single-column SummarizedExperiment containing correlation scores for a set of features, + # suitable as input to leapR enrichment functions. Optionally stores a mapped gene ID in rowData. + # Inputs: + # cor_named_vec: named numeric vector of scores (names = feature IDs) + # features_all: character vector of features to include (sets row order and rownames) + # col_label: column/sample label to assign in the SE (e.g., "_TOP") + # map_to_gene: optional named vector mapping feature -> gene ID/symbol (stored as hgnc_id) + # assay_label: assay name label to assign (e.g., "proteomics", "phospho", "rna") + # Output: + # SummarizedExperiment with 1 assay column holding the scores + v <- rep(NA_real_, length(features_all)); names(v) <- features_all + common <- intersect(names(cor_named_vec), features_all) + v[common] <- cor_named_vec[common] + mat <- matrix(v, nrow = length(v), ncol = 1, dimnames = list(features_all, col_label)) + rd <- S4Vectors::DataFrame(feature_id = features_all) + rd$hgnc_id <- if (is.null(map_to_gene)) features_all else map_to_gene[features_all] + se <- SummarizedExperiment::SummarizedExperiment( + assays = list(values = mat), + rowData = rd, + colData = S4Vectors::DataFrame(sample = col_label) + ) + SummarizedExperiment::assayNames(se) <- assay_label + se +} + +.safe_leapr <- function(...) { + # Run leapR::leapR() safely: catches errors, prints a readable message, and returns NULL + # instead of stopping the whole pipeline. This is used for Debugging. + # Inputs: + # ...: arguments passed directly to leapR::leapR() + # Output: + # leapR result object/table, or NULL on error + tryCatch(leapR::leapR(...), + error = function(e) { message("[leapR] ", conditionMessage(e)); NULL }) +} + +# Load a leapR built-in geneset by name +.load_leapr_geneset_by_name <- function(name) { + # Load a built-in leapR geneset dataset by name (e.g., "kinasesubstrates", "krbpaths"). + # Validates the name and errors if the dataset cannot be loaded. + # Inputs: + # name: character scalar geneset name + # Output: + # geneset object loaded from the leapR package + valid <- c("kinasesubstrates", "ncipid", "krbpaths", "longlist", "shortlist") + if (!(name %in% valid)) { + stop("Unknown geneset name: '", name, "'. Valid: ", paste(valid, collapse = ", ")) + } + suppressWarnings(utils::data(list = name, package = "leapR", envir = environment())) + if (!exists(name, inherits = FALSE)) { + stop("leapR dataset '", name, "' not found in the installed {leapR}.") + } + get(name, inherits = FALSE) +} + +# Decide default geneset from omic label when no override is provided +.default_geneset_for_omic <- function(omic_label) { + # Choose a default geneset database based on the omics label: + # - phospho-like labels -> kinasesubstrates + # - otherwise -> krbpaths + # Inputs: + # omic_label: character scalar describing the modality (e.g., "phospho", "rna", "global") + # Output: + # geneset object to use with leapR + ol <- tolower(omic_label) + if (ol %in% c("phospho","phosphoproteomics","phosphoprotein","phosphoproteome")) { + .load_leapr_geneset_by_name("kinasesubstrates") + } else { + .load_leapr_geneset_by_name("krbpaths") + } +} + +# ----------------------------- +# Main +# ----------------------------- +run_leapr_directional_one_cached <- function( + + # Future Consideration: + # Consider using synapse to store results + # + # For each drug, correlate uM_viability with each omics feature across samples, split features + # into TOP (positive; more resistant) and BOTTOM (negative; more sensitive), then run leapR + # enrichment separately on each direction. Supports phospho-specific site->gene handling, + # optional site-normalization for kinasesubstrates, CSV writing, and caching to .RData. + # Inputs: + # drugs: long drug-response data.frame (must include improve_drug_id, improve_sample_id, dose_response_metric, dose_response_value) + # df_long: long omics data.frame (sample/feature/value columns) + # sample_col: column name in df_long for sample IDs + # feature_col: column name in df_long for feature IDs (gene/site) + # value_col: column name in df_long for numeric measurement + # omic_label: modality label used in assay naming and output filenames (e.g., "rna", "global", "phospho") + # cache_path: file path to save/load cached results (.RData); skipped if always_rerun=TRUE + # write_csvs: TRUE/FALSE; write per-drug TOP/BOTTOM leapR tables to CSV + # always_rerun: TRUE/FALSE; ignore cache and recompute + # min_features: minimum features required to run leapR for TOP/BOTTOM + # test_one: TRUE/FALSE; only run the first drug (debug) + # geneset_name: optional built-in leapR geneset name + # geneset_object: optional geneset object to use directly (overrides geneset_name/default) + # Output: + # named list by drug: res_list[[drug]]$top and res_list[[drug]]$bottom (leapR results or NULL) + drugs, # Character vector of drugs to test (IDs/names used by your fits/model) + df_long, # Long-format omics table (one row per sample x feature) + sample_col, # Column name in df_long containing sample IDs + feature_col, # Column name in df_long containing feature IDs (e.g., gene/site) + value_col, # Column name in df_long containing numeric values to analyze + omic_label, # Short label for this modality (used in logs/output names), e.g. "RNA" + cache_path, # File path to cache (read/write) computed results + write_csvs = FALSE,# If TRUE, write result/intermediate CSVs to disk + always_rerun = FALSE,# If TRUE, ignore cache and recompute even if cache exists + min_features = 5, # Minimum # of features required to run; otherwise skip/return early + test_one = FALSE,# If TRUE, run a single test case (e.g., first drug) for debugging + geneset_name = NULL, # Optional geneset label (used for naming outputs/plot titles) + geneset_object = NULL # Optional geneset definition (e.g., character vector) to filter features + ) { + + # cache check! If the cached value exists, stop there. + if (!always_rerun && is.character(cache_path) && nzchar(cache_path) && file.exists(cache_path)) { + load(cache_path) # Skip recompute by loading cached res_list from disk. + if (exists("res_list")) return(res_list) + } + + # pivot long to matrix + feat_mat <- long_to_matrix(df_long, sample_col, feature_col, value_col) + if (is.null(feat_mat) || !nrow(feat_mat) || !ncol(feat_mat)) { + warning("[run_leapr_directional_one_cached] Empty feature matrix after pivot; returning empty list.") + return(list()) + } + + # pick geneset + if (!is.null(geneset_object)) { + geneset_db <- geneset_object + } else if (!is.null(geneset_name)) { + geneset_db <- .load_leapr_geneset_by_name(geneset_name) + } else { + geneset_db <- .default_geneset_for_omic(omic_label) + } + + # optional phospho site gene mapping + map_site2gene <- NULL + is_phospho <- tolower(omic_label) %in% c("phospho","phosphoproteomics","phosphoprotein","phosphoproteome") + if (is_phospho) { + map_site2gene <- .build_phospho_gene_map_from_long(df_long, feature_col) + if (is.null(map_site2gene) || !length(map_site2gene)) { + phos_features <- colnames(feat_mat) + map_site2gene <- setNames( + vapply(phos_features, .extract_gene_from_site, FUN.VALUE = character(1)), + phos_features + ) + } + # Print statements + feats <- colnames(feat_mat) + mapped <- map_site2gene[feats] + n_mapped <- sum(!is.na(mapped) & mapped != "") + message(sprintf("[phospho mapping] %d/%d sites mapped to gene symbols (%.1f%%)", + n_mapped, length(feats), 100 * n_mapped / max(1, length(feats)))) + if (n_mapped < length(feats)) { + unm <- feats[is.na(mapped) | mapped == ""] + if (length(unm)) { + show_n <- min(5L, length(unm)) + message("[phospho mapping] Unmapped examples: ", + paste(utils::head(unm, show_n), collapse = ", "), + if (length(unm) > show_n) paste0(" ... +", length(unm) - show_n, " more") else "") + } + } + } + + # For phospho, detect when we are using site-level kinase substrates + uses_kinase_sites <- is_phospho && { + if (!is.null(geneset_name)) { + identical(geneset_name, "kinasesubstrates") + } else { + identical(geneset_db, .load_leapr_geneset_by_name("kinasesubstrates")) + } + } + + # If using kinasesubstrates, normalize column names so they match site IDs + if (uses_kinase_sites) { + old_sites <- colnames(feat_mat) + norm_sites <- .normalize_kinase_site_id(old_sites) + if (!identical(old_sites, norm_sites)) { + message("[kinasesubstrates] Normalizing phosphosite IDs (e.g. 'AAAS-S495s' -> 'AAAS-S495')") + colnames(feat_mat) <- make.unique(norm_sites) + } + } + + res_list <- list() + out_csv_dir <- file.path("leapR_top_paths", "dir_split") + if (write_csvs && !dir.exists(out_csv_dir)) dir.create(out_csv_dir, recursive = TRUE) + + all_drugs <- unique(drugs$improve_drug_id) + if (test_one && length(all_drugs) > 0) { + message("[run_leapr_directional_one_cached] test_one=TRUE then running only the first drug: ", all_drugs[[1]]) + all_drugs <- all_drugs[[1]] + } else { + all_drugs <- sort(all_drugs) + } + + total <- length(all_drugs) + for (i in seq_along(all_drugs)) { + drug <- all_drugs[[i]] + message(sprintf("[%-3d/%-3d] %s", i, total, drug)) + + # mean response per sample for uM_viability + dv <- drugs %>% + dplyr::filter(.data$improve_drug_id == !!drug, + .data$dose_response_metric == "uM_viability") %>% + dplyr::group_by(.data$improve_sample_id) %>% + dplyr::summarise(resp = mean(.data$dose_response_value, na.rm = TRUE), + .groups = "drop") + + if (!nrow(dv)) { + message(" No response rows for metric 'uM_viability'; skipping.") + next + } + dv_vec <- stats::setNames(dv$resp, dv$improve_sample_id) + + # correlations at site-level (or normalized site-level for kinasesubstrates) + cors <- .col_spearman(dv_vec, feat_mat) + pos <- cors[!is.na(cors) & cors > 0] # resistant (TOP) + neg <- cors[!is.na(cors) & cors < 0] # sensitive (BOTTOM; flip) + message(sprintf(" Features (site-level): pos=%d, neg=%d (min_features=%d)", + length(pos), length(neg), min_features)) + + if (uses_kinase_sites && i == 1) { + ks_sites <- unique(unlist(geneset_db[["matrix"]])) + ov <- intersect(names(cors), ks_sites) + message("[kinasesubstrates] Overlapping sites with geneset (first drug): ", length(ov)) + if (length(ov)) { + message(" Example overlaps: ", paste(utils::head(ov, 5), collapse = ", ")) + } + } + + # For phospho + GENE-LEVEL sets, collapse site to gene before SE + if (is_phospho && !uses_kinase_sites) { + pos <- .collapse_sites_to_genes(pos, map_site2gene, agg = "mean") + neg <- .collapse_sites_to_genes(neg, map_site2gene, agg = "mean") + message(sprintf(" Gene-level: pos=%d, neg=%d", length(pos), length(neg))) + } + + res_list[[drug]] <- list(top = NULL, bottom = NULL) + + # TOP (resistant) + if (length(pos) >= min_features) { + feats_top <- names(pos) + se_top <- .build_se_from_corvec( + cor_named_vec = pos, + features_all = feats_top, + col_label = paste0(drug, "_TOP"), + map_to_gene = if (is_phospho) NULL else NULL, # not needed when features are genes/sites + assay_label = omic_label + ) + top_res <- .safe_leapr( + geneset = geneset_db, + enrichment_method = "enrichment_in_order", + eset = se_top, + assay_name = omic_label, + primary_columns = paste0(drug, "_TOP"), + id_column = NULL + ) + res_list[[drug]]$top <- top_res + message(" TOP (resistant): ", if (is.null(top_res)) "no result" else "OK") + if (write_csvs && !is.null(top_res)) { + utils::write.csv(as.data.frame(top_res), + file = file.path(out_csv_dir, paste0(drug, "_", omic_label, "_TOP.csv")), + row.names = FALSE) + } + } else { + message(" TOP (resistant): skipped (too few positive features)") + } + + # BOTTOM (sensitive) + if (length(neg) >= min_features) { + # Flip sign so strongest negatives rank highest. + neg_flip <- -neg + feats_bot <- names(neg_flip) + se_bot <- .build_se_from_corvec( + cor_named_vec = neg_flip, + features_all = feats_bot, + col_label = paste0(drug, "_BOTTOM"), + map_to_gene = if (is_phospho) NULL else NULL, + assay_label = omic_label + ) + bot_res <- .safe_leapr( + geneset = geneset_db, + enrichment_method = "enrichment_in_order", + eset = se_bot, + assay_name = omic_label, + primary_columns = paste0(drug, "_BOTTOM"), + id_column = NULL + ) + res_list[[drug]]$bottom <- bot_res + message(" BOTTOM(sensitive): ", if (is.null(bot_res)) "no result" else "OK") + if (write_csvs && !is.null(bot_res)) { + utils::write.csv(as.data.frame(bot_res), + file = file.path(out_csv_dir, paste0(drug, "_", omic_label, "_BOTTOM.csv")), + row.names = FALSE) + } + } else { + message(" BOTTOM(sensitive): skipped (too few negative features)") + } + + if (isTRUE(test_one)) break + } + + if (is.character(cache_path) && nzchar(cache_path)) { + save(res_list, file = cache_path) + } + res_list +} + +# ----------------------------- +# Plot and save using leapR builtin plotter +# ----------------------------- +save_leapr_plots <- function( + # Save leapR pathway barplots for TOP (resistant) and BOTTOM (sensitive) results for each drug. + # Supports plotting all drugs or a requested subset (case-insensitive matching). + # Inputs: + # res_list: named list returned by run_leapr_directional_one_cached() + # omic_label: modality label used in plot titles and filenames + # top_n: number of top pathways to plot per direction + # drugs: NULL to plot all; otherwise character vector of drug IDs/names to plot (case-insensitive) + # outdir: output directory for PDF files + # Output: + # invisible(NULL); side-effect is writing PDF plots to outdir + res_list, + omic_label, + top_n = 15, + drugs = NULL, # NULL = plot all drugs in res_list; otherwise character vector of drug IDs/names (case-insensitive) + outdir = "figs" # output directory for PDFs +) { + if (!length(res_list)) return(invisible(NULL)) + dir.create(outdir, showWarnings = FALSE, recursive = TRUE) + + safelabel <- function(x) gsub("[^A-Za-z0-9_.-]", "_", x) + + all_drugs <- names(res_list) + if (!length(all_drugs)) { + message("[save_leapr_plots] res_list has no named drug entries.") + return(invisible(NULL)) + } + + # Case-insensitive lookup table: UPPER(drug) -> original name in res_list + key_upper <- toupper(all_drugs) + drug_map <- stats::setNames(all_drugs, key_upper) + + # Decide which drugs to plot + if (is.null(drugs)) { + plot_drugs <- all_drugs + } else { + req <- as.character(drugs) + req_upper <- toupper(req) + + found_upper <- intersect(req_upper, names(drug_map)) + plot_drugs <- unname(drug_map[found_upper]) + + missing_upper <- setdiff(req_upper, names(drug_map)) + if (length(missing_upper)) { + missing_original <- req[req_upper %in% missing_upper] + message("[save_leapr_plots] Skipping ", length(missing_original), + " requested drug(s) not present in res_list (case-insensitive match): ", + paste(utils::head(missing_original, 10), collapse = ", "), + if (length(missing_original) > 10) paste0(" ... +", length(missing_original) - 10, " more") else "") + } + } + + if (!length(plot_drugs)) { + message("[save_leapr_plots] No matching drugs to plot.") + return(invisible(NULL)) + } + + for (drug in plot_drugs) { + two <- res_list[[drug]] + if (is.null(two)) next + + # TOP (resistant) + if (!is.null(two$top)) { + p_top <- leapR::plot_leapr_bar( + two$top, + title = paste0(drug, " — ", omic_label, " (Resistant)"), + top_n = top_n + ) + if (!is.null(p_top)) { + fn <- file.path(outdir, paste0( + "pathways_", safelabel(drug), "_", omic_label, "_resistant_top", top_n, ".pdf" + )) + ggplot2::ggsave(fn, p_top, width = 7, height = 5, device = grDevices::cairo_pdf) + } + } + + # BOTTOM (Sensitive) + if (!is.null(two$bottom)) { + p_bot <- leapR::plot_leapr_bar( + two$bottom, + title = paste0(drug, " — ", omic_label, " (Sensitive)"), + top_n = top_n + ) + if (!is.null(p_bot)) { + fn <- file.path(outdir, paste0( + "pathways_", safelabel(drug), "_", omic_label, "_sensitive_top", top_n, ".pdf" + )) + ggplot2::ggsave(fn, p_bot, width = 7, height = 5, device = grDevices::cairo_pdf) + } + } + } + + invisible(NULL) +} + + + +#Extra plotting functions +count_sig_pathways <- function(res_list, omic_label, alpha = 0.05, pcol = "SignedBH_pvalue") { + if (is.null(res_list) || !length(res_list)) return(tibble()) + + drugs <- names(res_list) + rows <- lapply(drugs, function(drug) { + two <- res_list[[drug]] + if (is.null(two)) return(NULL) + + one_dir <- function(tbl, direction_label) { + if (is.null(tbl) || !nrow(tbl)) { + return(tibble( + omic_label = omic_label, drug = drug, direction = direction_label, + n_sig = 0L, n_total = 0L + )) + } + + df <- as.data.frame(tbl) + + # No fallback: require SignedBH_pvalue + if (!(pcol %in% colnames(df))) { + stop(sprintf( + "[count_sig_pathways] Column '%s' not found for drug='%s', omic='%s', direction='%s'. Columns: %s", + pcol, drug, omic_label, direction_label, paste(colnames(df), collapse = ", ") + )) + } + + pv <- df[[pcol]] + + # Robust whether SignedBH_pvalue is signed or not + tibble( + omic_label = omic_label, + drug = drug, + direction = direction_label, + n_sig = sum(!is.na(pv) & abs(pv) < alpha), + n_total = nrow(df) + ) + } + + bind_rows( + one_dir(two$top, "Resistant (TOP)"), + one_dir(two$bottom, "Sensitive (BOTTOM)") + ) + }) + + bind_rows(rows) +} + + + + + +plot_sig_pathways_one_omic_paged <- function(res_list, omic_label, alpha = 0.05, page_size = 80) { + # Plot (paged) bar charts of the number of significant enriched pathways per drug for a + # single omics modality. Uses a fixed y-axis across all pages, orders drugs globally by + # total significant pathways (desc), and splits drugs into pages to keep plots readable. + # Each page is printed to the current graphics device. + # Inputs: + # res_list: named list of per-drug enrichment results (typically output from leapR runs); + # names(res_list) should be drug names/IDs, and each entry should contain TOP/BOTTOM + # enrichment tables used by count_sig_pathways() + # omic_label: character scalar label for the modality (e.g., "rna", "global", "phospho") + # alpha: numeric significance threshold applied to pcol (default 0.05) + # page_size: integer number of drugs to include per page (default 80) + # Output: + # invisibly returns NULL; produces ggplot2 pages via print(p). If no significant pathways + # are found, prints a message and returns invisibly. + df <- count_sig_pathways(res_list, omic_label, alpha = alpha, pcol = "SignedBH_pvalue") + if (!nrow(df)) return(invisible(NULL)) + + # total sig per drug and drop zeros + totals <- df %>% + group_by(drug) %>% + summarise(sig_total = sum(n_sig, na.rm = TRUE), .groups = "drop") %>% + filter(sig_total > 0) %>% + arrange(desc(sig_total), drug) + + if (!nrow(totals)) { + message("[plot_sig_pathways_one_omic_paged] No drugs with significant pathways for omic='", omic_label, "'.") + return(invisible(NULL)) + } + + # Global ordering + paging + totals <- totals %>% + mutate(global_rank = row_number(), + page = ceiling(global_rank / page_size)) + + df2 <- df %>% + inner_join(totals, by = "drug") + + # Fix y-axis across ALL pages for this omic + ymax <- max(df2$n_sig, na.rm = TRUE) + if (!is.finite(ymax) || ymax < 1) ymax <- 1 + + # Fixed drug order across all pages + ordered_drugs <- totals$drug + n_pages <- max(totals$page, na.rm = TRUE) + + for (pg in seq_len(n_pages)) { + page_drugs <- totals %>% filter(page == pg) %>% pull(drug) + + dpg <- df2 %>% + filter(page == pg) %>% + mutate(drug = factor(drug, levels = page_drugs)) # keep global order within page + + p <- ggplot(dpg, aes(x = drug, y = n_sig, fill = direction)) + + geom_col(position = position_dodge(width = 0.85)) + + scale_y_continuous( + limits = c(0, ymax), + breaks = scales::pretty_breaks(n = 6) + ) + + labs( + title = paste0( + toupper(omic_label), + ": Significant enriched pathways per drug (|SignedBH_pvalue| < ", alpha, ")", + " - page ", pg, " of ", n_pages, " (", length(page_drugs), " drugs)" + ), + x = NULL, + y = "Number of significant pathways", + fill = NULL + ) + + theme_bw() + + theme( + axis.text.x = element_text(angle = 60, hjust = 1, vjust = 1), + legend.position = "right" + ) + + print(p) + } + + invisible(NULL) # This prevents NULL from appearing in the knitted resutls +}