Hi Pjs,

extending Detlew’s post --> 18592. Try this code:

`library(Power2Stage)`

CV <- 0.4

n1 <- 12

method <- "C"

pmethod <- c("shifted", "nct", "exact")

st <- proc.time()[[3]]

res <- data.frame(method=method, CV=CV, n1=n1, pmethod=pmethod,

power=NA, TIE=NA, n.tot=NA, pct05=NA, pct50=NA,

pct95=NA, pct.stg2=NA)

for (j in seq_along(pmethod)) {

x <- power.2stage(alpha=rep(0.0294, 2), method=method, CV=CV, n1=n1,

GMR=0.95, theta0=0.95, pmethod=pmethod[j],

npct=c(0.05, 0.5, 0.95))

y <- power.2stage(alpha=rep(0.0294, 2), method=method, CV=CV, n1=n1,

GMR=0.95, theta0=1.25, pmethod=pmethod[j])

res[j, "power"] <- round(x$pBE, 4)

res[j, "TIE"] <- round(y$pBE, 4)

res[j, "n.tot"] <- round(x$nmean, 1)

res[j, 8:10] <- x$nperc

res[j, "pct.stg2"] <- round(x$pct_s2, 1)

}

et <- proc.time()[[3]] - st

cat("run time:", signif(et/60, 1), "minutes\n"); print(res, row.names=FALSE)

`run time: 8.9 minutes`

method CV n1 pmethod power TIE n.tot pct05 pct50 pct95 pct.stg2

C 0.4 12 shifted 0.7509 0.0342 79.0 34 74 142 99.0

C 0.4 12 nct 0.7506 0.0345 78.9 32 74 142 98.9

C 0.4 12 exact 0.7506 0.0345 78.9 32 74 142 98.9

`power`

overall power, `TIE`

empiric Type I Error (for true ratio 1.25), `n.tot`

expected average total sample size; `pct05`

, `pct50`

, `pct95`

5%, 50%, 95% percentiles of expected total sample size, `pct.stg2`

percent of studies expected to proceed to the 2Reported by Potvin

` method CV n1 pmethod power TIE n.tot pct05 pct50 pct95 pct.stg2`

C 0.4 12 shifted 0.7505 0.0346 79.0 32 74 142 99.0

Try this code with a

`runs <- 25`

res <- data.frame(power=rep(NA, runs), TIE=rep(NA, runs))

st <- proc.time()[[3]]

for (j in 1:runs) {

res[j, "power"] <- power.2stage(alpha=rep(0.0294, 2), method=method,

CV=CV, n1=n1, GMR=0.95, theta0=0.95,

pmethod="shifted", setseed=FALSE)$pBE

res[j, "TIE"] <- power.2stage(alpha=rep(0.0294, 2), method=method,

CV=CV, n1=n1, GMR=0.95, theta0=1.25,

pmethod="shifted", setseed=FALSE)$pBE

}

et <- proc.time()[[3]] - st

cat("run time:", signif(et/60, 2), "minutes\n"); summary(res)

`run time: 6.4 minutes`

power TIE

Min. :0.7482 Min. :0.03403

1st Qu.:0.7503 1st Qu.:0.03443

Median :0.7511 Median :0.03455

Mean :0.7511 Mean :0.03451

3rd Qu.:0.7522 3rd Qu.:0.03460

Max. :0.7537 Max. :0.03485

If you want to estimate the sample size of the 2

`sampleN2.TOST()`

of `Power2Stage`

instead of `sampleN.TOST()`

of `PowerTOST`

and subtract n`x <- sampleN2.TOST(alpha=0.0294, CV=CV, n1=n1, theta0=0.95)`

y <- sampleN.TOST(alpha=0.0294, CV=CV, theta0=0.95, details=FALSE, print=FALSE)

print(x, row.names=FALSE) # n2 directly with correct df

print(y, row.names=FALSE) # total n with incorrect df

` Design alpha CV theta0 theta1 theta2 n1 Sample size Achieved power Target power`

2x2 0.0294 0.4 0.95 0.8 1.25 12 68 0.8107469 0.8

Design alpha CV theta0 theta1 theta2 Sample size Achieved power Target power

2x2 0.0294 0.4 0.95 0.8 1.25 80 0.8105103 0.8

Let’s see whether we can reproduce Potvin’s Example 2:

`library(PowerTOST)`

library(Power2Stage)

pass.1 <- pass.2 <- FALSE

alpha <- 0.0294

method <- "C"

n1 <- 12

log.GMR1 <- 0.08396

S.sq1 <- 0.032634

df1 <- n1-2

pwr.1 <- power.TOST(alpha=0.05, CV=mse2CV(S.sq1), n=n1,

method="shifted")

if (pwr.1 >= 0.80) {

CI.stg1 <- round(100*CI.BE(alpha=0.05, pe=exp(log.GMR1),

CV=mse2CV(S.sq1), n=n1), 2)

} else {

CI.stg1 <- round(100*CI.BE(alpha=alpha, pe=exp(log.GMR1),

CV=mse2CV(S.sq1), n=n1), 2)

}

if (CI.stg1[["lower"]] < 0.80 | CI.stg1[["upper"]] > 1.25) {

n2 <- sampleN2.TOST(alpha=alpha, CV=mse2CV(S.sq1), n1=n1,

theta0=0.95, method="shifted")[["Sample size"]]

} else {

pass.1 <- TRUE

}

if (!pass.1) {

log.GMR2 <- 0.014439

S.sq2 <- 0.045896

df2 <- n1 + n2 - 3

CI.stg2 <- round(100*exp(log.GMR2 + c(-1, +1)*

qt(1-alpha, df2)*sqrt(2*S.sq2/(n1+n2))), 2)

names(CI.stg2) <- c("lower", "upper")

if (CI.stg2[["lower"]] >= 0.80 | CI.stg2[["upper"]] <= 1.25) pass.2 <- TRUE

}

txt <- paste0("Power in the interim = ", round(100*pwr.1, 1), "%")

if (pwr.1 >= 0.80) {

txt <- paste0(txt, "\nSince power is >= 0.80, BE assessed with alpha 0.05")

} else {

txt <- paste0(txt, "\nSince power is < 0.80, BE assessed with alpha ", alpha)

}

if (pass.1) {

txt <- paste0(txt, "\nStudy passed in the 1st stage: ")

if (pwr.1 >= 0.80) {

txt <- paste0(txt, "90% CI: ")

} else {

txt <- paste0(txt, 100*(1-2*alpha), "% CI = ")

}

txt <- paste0(txt, CI.stg1[["lower"]], "% - ", CI.stg1[["upper"]], "%")

} else {

txt <- paste0(txt, "\nStudy failed in the 1st stage: ")

if (pwr.1 >= 0.80) {

txt <- paste0(txt, "90% CI = ")

} else {

txt <- paste0(txt, 100*(1-2*alpha), "% CI = ")

}

txt <- paste0(txt, CI.stg1[["lower"]], "% - ", CI.stg1[["upper"]], "%",#

"\n2nd stage with ", n2, " subjects initiated.")

}

if (pass.2) {

txt <- paste0(txt, "\nStudy passed in the 2nd stage: ")

} else {

txt <- paste0(txt, "\nStudy failed in the 2nd stage: ")

}

txt <- paste0(txt, 100*(1-2*alpha), "% CI = ",

CI.stg2[["lower"]], "% - ", CI.stg2[["upper"]], "%")

cat(txt, "\n")

`Power in the interim = 64.9%`

Since power is < 0.80, BE assessed with alpha 0.0294

Study failed in the 1st stage: 94.12% CI = 92.93% - 127.28%

2nd stage with 8 subjects initiated.

Study passed in the 2nd stage: 94.12% CI = 88.45% - 116.38%

Dear Pjs,

This is a fundamental misunderstanding.

What you try to do is, if I understand you correctly, to obtain the power of a two stage design.

This is

`PowerTOST`

.The framework of TSDs is so complicated that no algebraic solution for obtaining power is available. What you have to do is to use simulations, like the ones described in the Potvin paper.

Have a look into the R add-on package

`Power2Stage`

. With the function `power.2stage()`

you are able to verify the type 1 error and the power reported in the Potvin paper.Example:

`library(Power2Stage)`

# power at true ratio=0.95

power.2stage(alpha=c(0.0294, 0.0294), method="C", CV=0.4, n1=12, GMR=0.95, theta0=0.95)

gives

`TSD with 2x2 crossover `

Method C: alpha0 = 0.05, alpha (s1/s2) = 0.0294 0.0294

Target power in power monitoring and sample size est. = 0.8

Power calculation via non-central t approx.

CV1 and GMR = 0.95 in sample size est. used

No futility criterion

BE acceptance range = 0.8 ... 1.25

CV = 0.4; n(stage 1) = 12; GMR= 0.95

1e+05 sims at theta0 = 0.95 (p(BE) = 'power').

p(BE) = 0.75058

p(BE) s1 = 0.00986

Studies in stage 2 = 98.95%

Distribution of n(total)

- mean (range) = 78.9 (12 ... 302)

- percentiles

5% 50% 95%

32 74 142

`# empirical type 1 error`

power.2stage(alpha=c(0.0294, 0.0294), method="C", CV=0.4, n1=12, GMR=0.95, theta0=1.25)

gives

`TSD with 2x2 crossover `

Method C: alpha0 = 0.05, alpha (s1/s2) = 0.0294 0.0294

Target power in power monitoring and sample size est. = 0.8

Power calculation via non-central t approx.

CV1 and GMR = 0.95 in sample size est. used

No futility criterion

BE acceptance range = 0.8 ... 1.25

CV = 0.4; n(stage 1) = 12; GMR= 0.95

1e+06 sims at theta0 = 1.25 (p(BE) = TIE 'alpha').

p(BE) = 0.034468

p(BE) s1 = 0.003881

Studies in stage 2 = 99.5%

Distribution of n(total)

- mean (range) = 79.1 (12 ... 360)

- percentiles

5% 50% 95%

32 74 142

BTW: If you aim to work exactly like Potvin et al. you have to set the argument

`pmethod="shifted"`

in the function calls. This calculates power in the power monitoring step of the two stage schemes via a somewhat crude approximation and was used in the Potvin paper for speed reasons only.Nevertheless you will not obtain exacly the same numbers due to the unknown auxillary conditions for the simulations like e.g. the seed of the random number genarator or its type.

BTW2: The negative power from FARTSSIE is of course nonsense and results from an approximation for the power calculation. The exact power for a fixed (1-stage) design with your settings is

`library(PowerTOST)`

power.TOST(CV=0.4, n=12)

[1] 0.02843316

]]>Note that my previous reply is for night dosing(10pm). We were dosing chrono-technology drug which Tmax at 6 hours and designed to be given at night. You can't ask the subject to restrict his posture throughout the night.

Yes and no. If you go by Tmax alone then it's kind of a flaw logic. An example drug is COVERA HS which as a Tmax of 11 hours. Are you going to ask the subjects to follow a 11 hour postural restriction?

J]]>

OK, Thank you very much......

Edit: Full quote removed. Please delete everything from the text of the original poster which is not necessary in understanding your answer; see also this post --> 550! [Ohlbe]]]>

Hi All,

Below is the power reported for the assumed parameters with different software.

Can you pls guide for FARTSSIE and R which method for power estimation is being used.

Currently i am cross verifying power value with the published paper of the potvin for two stage. They had calculated power with modification of Hauschke et al. For example if i take N=12, CV=40% and ratio 95% power by method C is reported to be 0.7505. For the same parameters power by FARTSSIE is -24.19%.

Regards

Pjs]]>

Dear Lakshmi.

Just a short comment on the whole procedure.

To me that is the whole point: getting the tablet out of the stomach in a standardized way. And by the way, well,there is not much to float on in a fasted stomach.

I do not believe that the absorption/PK of the drug compound is of relevance. See, under fasted conditions gastric emptying of a dosage form is somewhat variabel. I have seen tablets passing through the gaster within seconds as well as taking it easy and remaining for hours in the fundus.

Most people live well by assuming that on average there will be a cleansing movement forcing the tablet from the stomach within not more than 90 minutes. This is roughly 2 hours (we are pharmacists not mathematicians ;-)), double to be on the safe side.

So to me its physiology we need to handle and not pharmacokinetics here.

Best regards,

Relaxation.]]>

Dear Sir,

thank your for your reply.

As per your above mentioned post, the statement state that, 02 or 01 hour postural restriction useful for floating the tablet. But considering the PK (absorption) point of view, for example drug cmax is 4 hours. if we kept 02 hours restriction. after 02 hours if subject will move that will impact on the absorption phase of drug. so better we need keep 04 hours and followed up to Tmax.

Now, My question is if Tmax 6 hours, we cant tell to subject follow the 6 hours postural restriction. But considering absorption phase it will be required. This cases what wee need to do.

Edit: Full quote removed. Please delete everything from the text of the original poster which is not necessary in understanding your answer; see also this post #5 --> 16205! [Helmut]]]>

My RLD is from US so I'm referring FDA approved PI.

yes sure.

There is need to refer correct source for warning and precautions, adverse reactions, interactions etc.

Agreed. If I'm submitting the case study in specific regulatory other than CDSCO, then I've to refer official approved source wrt RLD.

Edit: Full quote removed. Please delete everything from the text of the original poster which is not necessary in understanding your answer; see also this post #5 --> 16205! [Helmut]]]>

Hi ElMaestro,

Can’t remember but sounds familiar.

Simply great!

It’s a TSD (I’ll refer to your famous paper), parallel, n

Correct me if I am wrong but the whole point of 4 hr restriction is to ensure that the pill/tablet has passed the stomach and into the intestines(Even with floating tablets i think 4hrs is more than enough).

For night (meaning 10pm) dosing we usually do 2 hr restriction follow by lying down on the right side, or a 1 hr restriction follow by lying down on the right side.

J]]>

I use the product monograph from the manufacturer (FDA studies). Drugs@FDA is my alternate source.

J]]>

Hi Hötzi,

:-D:-D:-D I still recall your response to one of the previous executables I shared. :-D:-D:-D Forgot which one (Was it the Deep Sink project or something) - apart from disassembly it I think it was subjected to all sorts of challenges and weird settings and extremities.

Being away from office without windows....Og h the horror! I can't imagine what that would be like...

Good luck at the agency. I imagine you are well prepared and it will be a success.]]>

Hi ElMaestro,

Why should I? Anyway, I have no win-OS with me (sitting in a Hotel on the other side of el Jeffe’s office waiting to be grilled tomorrow).

Strange. I made a cosmetical change to your post.]]>

Hi Nobody and everybody else,

to illustrate it please have a look at this archive. Unzip to its own folder, then drag and drop the csv file onto the executable. You may need to give green light if your security package protests or something. There is no virus in it (at least there wasn't when the programmer uploaded it), and it does not phone home.

It demonstrates what the programmer is doing with a bootstrap solution for dissolution data. It provides 1.000.000 bootstraps of two datasets and illustrates the results in the html file.

The hot stuff, whose detail I will not discuss here (that's for now solely a matter between me and someone in a public institution who likes to look at dissolution and to reject dossiers in murderous ways :-D ... I hope said person isn't reading this post), is the automatic generation of a series of graphs and plots and with an objective function, gamma. The user looks at these graphs, considers the gamma, and takes a decision. The user could in principle have a look at 1.000.000 such graphs and gammas (how about doing that in Prism, Nobody?).

The selection of curves to present is done in a pseudo-clever (actually pretty retarded, presently) fashion.

I hope this illustrates why a manual solution isn't viable (if not then then trust me, a manual solution is not viable. Not at all.)

For those who are into bootstraps, the result is at the end as a table, including raw bootstrap confidence intervals, Bias-corrected bootstrap interval, and bias-corrected and accelerated interval.

Hötzi: If you are reading this then I know you will absolutely torture my poor innocent little executable which is guilty of no fault :-D:-D:-D:-D:-D:-D. Yes, you can make it crash. Yes, the programmer (some wacko from Denmark) did not implement error trapping for whatever garbage the users fill in. No, this is not for the masses. Yes, the programmer has restricted the intended use to this dataset only.

By the way: The upload link function didn't work 2 minutes ago (it added bebac.at in the target string), so I pasted the raw link. Please forgive me.]]>

...but how many days have you already wasted for this millisecond code to do what you want it to do? Have you succeeded yet? :-D Just saying...

Prism you learn in 30 minutes and if you have an appropriate template your done in a few minutes to fine-tune your output.

If you have a hammer the world is full of nails. Unlucky if you have a sledgehammer and your nail is just 10 mm long.... ;-)]]>

Dear Friends,

I am trying to use DDSolver for dissolution profile comparison by model dependent method but could not enter more than 4 parameters in any of the listed models. I am having 6 parameters.

Can any body help me? Please share some link to its tutorial.

Thanks]]>

Dear Siddheshwar,

Oh, there are actually much more than 2, if you also look outside of the USA...

Well, use for what purpose ?

When you prepare the information form for the subjects: I would suggest you to also look at the information approved in India, not just in the USA, Europe or wherever... Use the most complete list to inform the subjects. You may actually combine information from several official sources to prepare your information form and make it as complete and comprehensive as possible. Keep all documents used as source in the TMF.

To determine whether SAEs are expected or not (and therefore could be a SUSAR requiring expedited reporting), if you are the sponsor or if the sponsor delegated this task to you: only approved information should be used, not information submitted but not yet approved. But if you run your trial in India, wouldn't the CDSCO expect you to use the information they have approved, rather than the FDA's ?]]>

Hi Seppe,

cannot get exactly the same (assuming normal distribution)

confint.cv <- function(x,alpha=.05, method="modmckay"){

# Calculate the confidence interval of the cv of the vector x

# Author: Kevin Wright

# See: Vangel, Mark. Confidence Intervals for a Normal Coefficient

# of Variation. American Statistician, Vol 15, No1, p. 21--26.

# x <- c(326,302,307,299,329)

# confint.cv(x,.05,"modmckay")

x <- na.omit(x)

v <- length(x)-1

mu <- mean(x)

sigma <- sqrt(var(x))

k <- sigma/mu

# CV > .33 may give poor results, so warn the user

if(k>.33) warning("Confidence interval may be very approximate.")

method <- casefold(method) # In case we see "McKay"

if(method=="mckay"){

# McKay method. See equation 15.

t1 <- qchisq(1-alpha/2,v)/v

t2 <- qchisq(alpha/2,v)/v

u1 <- v*t1

u2 <- v*t2

lower <- k/sqrt(( u1/(v+1) -1)*k*k + u1/v)

upper <- k/sqrt(( u2/(v+1) -1)*k*k + u2/v)

} else if (method=="naive"){

# Naive method. See equation 17.

t1 <- qchisq(1-alpha/2,v)/v

t2 <- qchisq(alpha/2,v)/v

lower <- k/sqrt(t1)

upper <- k/sqrt(t2)

} else {

# Modified McKay method. See equation 16.

method="modmckay"

u1 <- qchisq(1-alpha/2,v)

u2 <- qchisq(alpha/2,v)

lower <- k/sqrt(( (u1+2)/(v+1) -1)*k*k + u1/v)

upper <- k/sqrt(( (u2+2)/(v+1) -1)*k*k + u2/v)

}

ci <- cbind.data.frame(method = method, lower=lower,upper=upper, CV= k, alpha=alpha)

return(ci)

}

x=c(3.8,3.9,3.9,3.8,4.1,4.0,3.4,3.6,3.8,3.7)

results <- rbind(confint.cv(x), confint.cv(x, method = "mckay"), confint.cv(x, method = "naive"))

print(results)

gives

` method lower upper CV alpha`

1 modmckay 0.03617573 0.09632067 0.05263158 0.05

2 mckay 0.03618047 0.09641016 0.05263158 0.05

3 naive 0.03620185 0.09608475 0.05263158 0.05

]]>Two different sources available for Prescribing Information (PI).

1. Drugs@FDA: FDA Approved Drug Products

2. DAILYMED (U.S. National Library of Medicine)

Drugs@FDA has last approved PI and posted by the Food and Drug Administration (FDA) itself.

The drug labeling information on DAILYMED is the most recent PI (different than FDA approved labeling) submitted (by firms) to FDA but labeling changes have not been approved by FDA.

Among these sources, which source of Prescribing Information being used during conduct of clinical study considering the safety and other concerns.]]>

Hi

I have a problem with calculating the 95%CI of the CV

I have an example but i don't now how they get the result of the 95%CI.

Example:

1. 3,8

2. 3,9

3. 3,9

4. 3,8

5. 4,1

6. 4,0

7. 3,4

8. 3,6

9. 3,8

10.3,7

Mean= 3,8

SD= 0,2

CV= 5,3

95%CI (3,6;9,7)

How do you get the interval of 3,6;9,7 from the CV?

The only thing I know that it's not a normal distribution

Greetz]]>