COUNT.TEST.R-69- x = x[ idx ]
COUNT.TEST.R-70-
COUNT.TEST.R:71: if (!is.null( CountPerBp ) && class( x@rowRanges ) == "GRanges" )
COUNT.TEST.R-72- {
COUNT.TEST.R-73- # For each group the count per hundred bp must be equal or greater
--
COUNT.TEST.R-119- x = x[ !is.na( test$log2FC ) ]
COUNT.TEST.R-120-
COUNT.TEST.R:121: if ( FilterLog2FC && is.null( pvalCutOff ) && !saveAll ) {
COUNT.TEST.R-122- x = x[ abs( x@elementMetadata$log2FC ) > Minlog2FC ]
COUNT.TEST.R-123- }
COUNT.TEST.R:124: if ( !is.null( pvalCutOff ) && !saveAll )
COUNT.TEST.R-125- {
COUNT.TEST.R-126- if ( FilterLog2FC )
--
COUNT.TEST.R-131- x = x[ x@elementMetadata$adj.pval < pvalCutOff, ]
COUNT.TEST.R-132- } else {
COUNT.TEST.R:133: if (!is.null( pvalCutOff ) && saveAll) {
COUNT.TEST.R-134- if (FilterLog2FC ) idx = which( abs( x@elementMetadata$log2FC ) > Minlog2FC)
COUNT.TEST.R-135- x@elementMetadata$adj.pval <- rep( 1, nrow( x ) )
--
divHellinger.R-24-{
divHellinger.R-25- hdiv = 0
divHellinger.R:26: if (!is.na(sum( p )) && (sum( p ) > 0)) {
divHellinger.R-27- if (is.null(n)) {
divHellinger.R-28- hdiv = 2*((sqrt(p[1]) - sqrt(p[2]))^2 + (sqrt(1 - p[1]) - sqrt(1 - p[2]))^2)
--
filterByCoverage.R-21- verbose = TRUE) {
filterByCoverage.R-22- cn = names(col.names)
filterByCoverage.R:23: if ( (!is.element("mC", cn) && !is.element("mC", cn)) && !is.element("coverage", cn) )
filterByCoverage.R-24- stop( "* Provide the number of the 'coverage' column or the numbers for columns 'mC' & 'uC'")
filterByCoverage.R-25- if (class(x) != "list") {
--
infDivergence.R-44- percentile = 0.999, num.cores = 1L, tasks = 0L, meth.level = FALSE, verbose = TRUE )
infDivergence.R-45-{
infDivergence.R:46: if (is.null(columns) && (!meth.level)) columns = 1:2
infDivergence.R:47: if (meth.level && (is.null(columns))) columns = 1
infDivergence.R-48- sn = names(indiv)
infDivergence.R-49- if (meth.level) {
--
readCounts2GRangesList.R-67- colnames(x) <- cn
readCounts2GRangesList.R-68- if (!is.element("end", cn)) x$end = x$start
readCounts2GRangesList.R:69: if (is.element("coverage", cn) && is.element("mC", cn)) x$uC = x$coverage - x$mC
readCounts2GRangesList.R:70: if (is.element("fraction", cn) && is.element("coverage", cn)) {
readCounts2GRangesList.R-71- x$mC = x$fraction * x$coverage
readCounts2GRangesList.R-72- }
readCounts2GRangesList.R:73: if (is.element("percent", cn) && is.element("coverage", cn)) {
readCounts2GRangesList.R-74- x$mC = x$coverage * x$percent/100
readCounts2GRangesList.R-75- x$uC = x$coverage - x$mC
--
Weibull3Ps.R-53-
Weibull3Ps.R-54- # To reduce the number of points to used in the fit
Weibull3Ps.R:55: if (!missing( npoints ) && npoints < N) {
Weibull3Ps.R:56: if (!missing( npoints0 ) && npoints0 < N) {
Weibull3Ps.R-57- F0 = ECDF( X, npoints = npoints0 )
Weibull3Ps.R-58- X0 = knots( F0 )
--
Weibull3Ps.R-81- silent = TRUE ) )
Weibull3Ps.R-82-
Weibull3Ps.R:83: if (inherits( FIT1, "try-error" ) && !missing( npoints0 ))
Weibull3Ps.R-84- {
Weibull3Ps.R-85- FIT0 = try( nlsLM( formula, data = data.frame( X = X0, Y = pX0 ),
--
Weibull3Ps.R-137- }
Weibull3Ps.R-138-
Weibull3Ps.R:139: if (inherits( FIT2, "try-error" ) && !missing( npoints0 )) {
Weibull3Ps.R-140- cat("*** Trying with less data \n" )
Weibull3Ps.R-141- FIT0 = try( nlsLM( formula, data = data.frame( X = X0, Y = pX0 ),
--
Weibull3Ps.R-154- }
Weibull3Ps.R-155-
Weibull3Ps.R:156: if (inherits( FIT2, "try-error" ) && !missing( npoints0 )) {
Weibull3Ps.R-157- if (!inherits( FIT0, "try-error" )) {
Weibull3Ps.R-158- FIT2 = FIT0
--
Weibull3Ps.R-175- } else PASS2 = FALSE
Weibull3Ps.R-176-
Weibull3Ps.R:177: if (PASS1 && PASS2) {
Weibull3Ps.R-178- # Save results
Weibull3Ps.R-179- sink( file = logfile)
--
Weibull3Ps.R-186- if (AIC(FIT1) <= AIC(FIT2) ) FIT = FIT1 else FIT = FIT2
Weibull3Ps.R-187- }
Weibull3Ps.R:188: if ( PASS1 && !PASS2 ) {
Weibull3Ps.R-189- FIT = FIT1
Weibull3Ps.R-190- # Save results
--
Weibull3Ps.R-264-
Weibull3Ps.R-265-
Weibull3Ps.R:266: if (inherits( FIT1, "try-error" ) && inherits( FIT2, "try-error" ))
Weibull3Ps.R-267- R.cross.FIT = 0 else {
Weibull3Ps.R-268- p.FIT1 = getPreds( coef( FIT1 ), X[ cros.ind.2 ] ) # prediction using model 1