R package of the course Métodos
Estatísticos at Federal University of Bahia.
Confidence interval are computed using the methods presented by
Montgomery and Runger (2010). All implemented methods are slighted
modification of methods already implemented in stats. The
user can compute bilateral and unilateral confidence intervals.
In this package, there are three approaches for computing a confidence interval for a population proportion.
n (scalar value) trialslibrary(tidyverse)
library(statBasics)
size <- 1000
sample <- rbinom(size, 1, prob = 0.5)
n_success <- sum(sample)
ci_1pop_bern(n_success, size, conf_level = 0.99)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 0.475 0.557 0.99library(tidyverse)
library(statBasics)
n <- c(30, 20, 10)
x <- n |> map_int(~ sum(rbinom(1, size = .x, prob = 0.75)))
ci_1pop_bern(x, n, conf_level = 0.99)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 0.500 0.833 0.99library(tidyverse)
library(statBasics)
x <- rbinom(50, size = 1, prob = 0.75)
ci_1pop_bern(x, conf_level = 0.99)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 0.658 1 0.99We illustrate how to compute a confidence interval for the mean of a normal distribution in two cases: 1) the standard deviation is known; 2) the standard deviation is unknown.
library(tidyverse)
library(statBasics)
media_pop <- 10
sd_pop <- 2
x <- rnorm(100, mean = media_pop, sd = sd_pop)
ci_1pop_norm(x, sd_pop = sd_pop, conf_level = 0.91)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 9.62 10.3 0.91library(tidyverse)
library(statBasics)
media_pop <- 10
sd_pop <- 2
x <- rnorm(100, mean = media_pop)
ci_1pop_norm(x, conf_level = 0.91)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 9.73 10.1 0.91library(tidyverse)
library(statBasics)
media_pop <- 10
sd_pop <- 2
x <- rnorm(100, mean = media_pop, sd = sd_pop)
ci_1pop_norm(x, parameter = 'variance', conf_level = 0.91)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 3.92 6.37 0.91library(tidyverse)
library(statBasics)
media_pop <- 800
taxa_pop <- 1 / media_pop
x <- rexp(100, rate = taxa_pop)
ci_1pop_exp(x)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 739. 1095. 0.95In the general case, a confidence interval using the t-Student distribution is still suitable, even if the distribution is not normal, as illustrated in the example bellow.
library(tidyverse)
library(statBasics)
media_pop <- 50
x <- rpois(100, lambda = media_pop)
ci_1pop_general(x)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 47.8 50.7 0.95Next, we will illustrate how to use this package to test a
statistical hypothesis about a single population parameter. All methods
are already implemented in R. This package provides slight
modifications for teaching purposes.
In the examples below, mean_null is the mean in the null
hypothesis H0:
alternative == "two.sided":
H0: mu == mean_null and H1: mu != mean_null.
Default value.alternative == "less":
H0: mu >= mean_null and
H1: mu < mean_nullalternative == "greater":
H0: mu =< mean_null and
H1: mu > mean_nulllibrary(tidyverse)
library(statBasics)
mean_null <- 5
sd_pop <- 2
x <- rnorm(100, mean = 10, sd = sd_pop)
ht_1pop_mean(x, mu = mean_null, conf_level = 0.95, sd_pop = sd_pop, alternative = "two.sided")
#> # A tibble: 1 × 10
#> statistic p_value critical_value critical_region alternative mu sig_level
#> <dbl> <dbl> <dbl> <chr> <chr> <dbl> <dbl>
#> 1 24.4 0 1.96 (-Inf,-1.960)U(1… two.sided 5 0.05
#> # … with 3 more variables: lower_ci <dbl>, upper_ci <dbl>, conf_level <dbl>library(tidyverse)
library(statBasics)
mean_null <- 5
sd_pop <- 2
x <- rnorm(100, mean = 10, sd = sd_pop)
ht_1pop_mean(x, mu = mean_null, conf_level = 0.95, alternative = "two.sided")
#> # A tibble: 1 × 10
#> statistic p_value critical_value critical_region alternative mu sig_level
#> <dbl> <dbl> <dbl> <chr> <chr> <dbl> <dbl>
#> 1 24.5 0 1.98 (-Inf,-1.984)U(1… two.sided 5 0.05
#> # … with 3 more variables: lower_ci <dbl>, upper_ci <dbl>, conf_level <dbl>In the examples below, sigma_null is the standard
deviation in the null hypothesis H0:
alternative == "two.sided":
H0: sigma == sigma_null and
H1: sigma != sigma_null. Default value.alternative == "less":
H0: sigma >= sigma_null and
H1: sigma < sigma_nullalternative == "greater":
H0: sigma =< sigma_null and
H1: sigma > sigma_nulllibrary(tidyverse)
library(statBasics)
sigma_null <- 4
sd_pop <- 2
x <- rnorm(100, mean = 10, sd = sd_pop)
ht_1pop_var(x, sigma = sigma_null, conf_level = 0.95, alternative = "two.sided")
#> # A tibble: 2 × 10
#> statistic p_value critical_value critical_region alternative sigma sig_level
#> <dbl> <dbl> <dbl> <chr> <chr> <dbl> <dbl>
#> 1 27.1 8.75e-14 73.4 (0,73.361)U(128… two.sided 4 0.05
#> 2 27.1 8.75e-14 128. (0,73.361)U(128… two.sided 4 0.05
#> # … with 3 more variables: lower_ci <dbl>, upper_ci <dbl>, conf_level <dbl>In the examples below, proportion_null is the proportion
in the null hypothesis H0:
alternative == "two.sided":
H0: proportion == proportion_null and
H1: proportion != proportion_null. Default value.alternative == "less":
H0: proportion >= proportion_null and
H1: proportion < proportion_nullalternative == "greater":
H0: proportion =< proportion_null and
H1: proportion > proportion_nullThe following example illustrates how to perform a hypothesis test when the number of successes (in a number of trials) is a scalar.
library(tidyverse)
library(statBasics)
proportion_null <- 0.1
p0 <- 0.75
x <- rbinom(1, size = 1000, prob = p0)
ht_1pop_prop(x, 1000, proportion = p0, alternative = "two.sided", conf_level = 0.95)
#> # A tibble: 1 × 10
#> statistic p_value critical_value critical_region alternative proportion
#> <dbl> <dbl> <dbl> <chr> <chr> <dbl>
#> 1 1.61 0.108 1.96 (-Inf,-1.960)U(1.960,… two.sided 0.75
#> # … with 4 more variables: sig_level <dbl>, lower_ci <dbl>, upper_ci <dbl>,
#> # conf_level <dbl>The example below shows how to perform a hypothesis test when the number of successes (in a number of trials) is a vector. The vector of number of trials must also be provided.
library(tidyverse)
library(statBasics)
proportion_null <- 0.9
p0 <- 0.75
n <- c(10, 20, 30)
x <- n |> map_int(~ rbinom(1, .x, prob = p0))
ht_1pop_prop(x, n, proportion = p0, alternative = "less", conf_level = 0.99)
#> # A tibble: 1 × 10
#> statistic p_value critical_value critical_region alternative proportion
#> <dbl> <dbl> <dbl> <chr> <chr> <dbl>
#> 1 -2.98 0.00143 -1.64 (-Inf,-1.645) less 0.75
#> # … with 4 more variables: sig_level <dbl>, lower_ci <dbl>, upper_ci <dbl>,
#> # conf_level <dbl>The following example shows how to perform a hypothesis test when the number of successes (in a number of trials) is a vector of zeroes and ones.
library(tidyverse)
library(statBasics)
proportion_null <- 0.1
p0 <- 0.75
x <- rbinom(1000, 1, prob = p0)
ht_1pop_prop(x, proportion = p0, alternative = "greater", conf_level = 0.95)
#> # A tibble: 1 × 10
#> statistic p_value critical_value critical_region alternative proportion
#> <dbl> <dbl> <dbl> <chr> <chr> <dbl>
#> 1 -0.803 0.789 1.64 (1.645, Inf) greater 0.75
#> # … with 4 more variables: sig_level <dbl>, lower_ci <dbl>, upper_ci <dbl>,
#> # conf_level <dbl>In this package, there are two approaches for computing a confidence interval for the difference in proportions.
In this case, we have the number of trials (n_x and
n_y) and the number of success (x and
y) for both popuations.
x <- 3
n_x <- 100
y <- 50
n_y <- 333
ci_2pop_bern(x, y, n_x, n_y)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 -0.232 -0.00840 0.95In this case, we have a vector of 0 and 1 (x and
y) for both populations.
x <- rbinom(100, 1, 0.75)
y <- rbinom(500, 1, 0.75)
ci_2pop_bern(x, y)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 -0.119 0.0954 0.95In this case, we can build the interval for the difference in means of two populations with known or unknown standard deviations, and we can build the ratio of the variances of two populations. DÚVIDA!
Next, we illustrate how to compute a confidence interval for the difference in means of two populations when population standard deviations are unknown.
x <- rnorm(1000, mean = 0, sd = 2)
y <- rnorm(1000, mean = 0, sd = 1)
# unknown variance and confidence interval for difference of means
ci_2pop_norm(x, y)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 -0.178 0.0962 0.95The example below illustrates how to obtain a confidence interval for the difference in means of two populations when population standard deviations are known.
x <- rnorm(1000, mean = 0, sd = 2)
y <- rnorm(1000, mean = 0, sd = 3)
# known variance and confidence interval for difference of means
ci_2pop_norm(x, y, sd_pop_1 = 2, sd_pop_2 = 3)
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 -0.309 0.138 0.95In thsi case, a confidence interval is obtained by considering the ratio of standard deviations (or variances) of the two populations.
x <- rnorm(1000, mean = 0, sd = 2)
y <- rnorm(1000, mean = 0, sd = 3)
# confidence interval for the variance ratio of 2 populations
ci_2pop_norm(x, y, parameter = "variance")
#> # A tibble: 1 × 3
#> lower_ci upper_ci conf_level
#> <dbl> <dbl> <dbl>
#> 1 0.360 0.461 0.95There two approaches to compare the proportions in two populations:
x and y)
and the numbers of trials (n_x and n_y) for
both populations;In this case, we have a vector of 1 (success) and 0 (failure) for both populations.
x <- rbinom(100, 1, 0.75)
y <- rbinom(500, 1, 0.75)
ht_2pop_prop(x, y)
#> [1] FALSE
#> # A tibble: 1 × 6
#> statistic p_value critical_value critical_region delta alternative
#> <dbl> <dbl> <dbl> <chr> <dbl> <chr>
#> 1 1.23 0.220 1.96 (-Inf,-1.960)U(1.960,Inf) 0 two.sidedIn this case, we have the number of success and the number of trials for both populations.
x <- 3
n_x <- 100
y <- 50
n_y <- 333
ht_2pop_prop(x, y, n_x, n_y)
#> [1] FALSE
#> # A tibble: 1 × 6
#> statistic p_value critical_value critical_region delta alternative
#> <dbl> <dbl> <dbl> <chr> <dbl> <chr>
#> 1 -3.21 0.00131 1.96 (-Inf,-1.960)U(1.960,Inf) 0 two.sidedThere are three cases to be considere when comparing two means:
t-test unknown but equal variances;t-test unknown and unequal variances;z-test known variances.t-test)x <- rnorm(1000, mean = 10, sd = 2)
y <- rnorm(500, mean = 5, sd = 2)
# H0: mu_1 - mu_2 == -1 versus H1: mu_1 - mu_2 != -1
ht_2pop_mean(x, y, delta = -1, var_equal = TRUE)
#> # A tibble: 1 × 6
#> statistic p_value critical_value critical_region delta alternative
#> <dbl> <dbl> <dbl> <chr> <dbl> <chr>
#> 1 54.3 0 1.96 (-Inf,-1.962)U(1.962, Inf) -1 two.sidedt-test)x <- rnorm(1000, mean = 10, sd = 2)
y <- rnorm(500, mean = 5, sd = 1)
# H0: mu_1 - mu_2 == -1 versus H1: mu_1 - mu_2 != -1
ht_2pop_mean(x, y, delta = -1)
#> # A tibble: 1 × 6
#> statistic p_value critical_value critical_region delta alternative
#> <dbl> <dbl> <dbl> <chr> <dbl> <chr>
#> 1 77.8 0 1.96 (-Inf,-1.962)U(1.962, Inf) -1 two.sidedz-test)x <- rnorm(1000, mean = 10, sd = 3)
x <- rnorm(500, mean = 5, sd = 1)
# H0: mu_1 - mu_2 >= 0 versus H1: mu_1 - mu_2 < 0
ht_2pop_mean(x, y, delta = 0, sd_pop_1 = 3, sd_pop_2 = 1, alternative = "less")
#> # A tibble: 1 × 6
#> statistic p_value critical_value critical_region delta alternative
#> <dbl> <dbl> <dbl> <chr> <dbl> <chr>
#> 1 -0.368 0.357 -1.64 (-Inf, -1.645) 0 lessx <- rnorm(100, sd = 2)
y <- rnorm(1000, sd = 10)
ht_2pop_var(x, y)
#> # A tibble: 2 × 7
#> statistic p_value critical_vale ratio alternative lower_ci upper_ci
#> <dbl> <dbl> <dbl> <dbl> <chr> <dbl> <dbl>
#> 1 0.0532 1.86e-43 0.733 1 two.sided 0.0535 0.0535
#> 2 0.0532 1.86e-43 1.32 1 two.sided 0.0535 0.0535