Obs,Financial Condition,TotCap/Assets,TotExp/Assets,TotLns&Lses/Assets 1,1,9.7,0.12,0.65 2,1,1,0.11,0.62 3,1,6.9,0.09,1.02 4,1,5.8,0.1,0.67 5,1,4.3,0.11,0.69 6,1,9.1,0.13,0.74 7,1,11.9,0.1,0.79...

1 answer below »
attached the book, homework word file and datasets. R script along with word document is required for homework submission. Thanks


Obs,Financial Condition,TotCap/Assets,TotExp/Assets,TotLns&Lses/Assets 1,1,9.7,0.12,0.65 2,1,1,0.11,0.62 3,1,6.9,0.09,1.02 4,1,5.8,0.1,0.67 5,1,4.3,0.11,0.69 6,1,9.1,0.13,0.74 7,1,11.9,0.1,0.79 8,1,8.1,0.13,0.63 9,1,9.3,0.16,0.72 10,1,1.1,0.16,0.57 11,0,11.1,0.08,0.43 12,0,20.5,0.12,0.8 13,0,9.8,0.07,0.69 14,0,7.9,0.08,0.53 15,0,9.6,0.09,0.73 16,0,12.5,0.09,0.3 17,0,18.3,0.08,0.49 18,0,7.2,0.11,0.55 19,0,14,0.08,0.44 20,0,8.3,0.08,0.51 CRIM,ZN,INDUS,CHAS,NOX,RM,AGE,DIS,RAD,TAX,PTRATIO,LSTAT,MEDV,CAT. MEDV 0.00632,18,2.31,0,0.538,6.575,65.2,4.09,1,296,15.3,4.98,24,0 0.02731,0,7.07,0,0.469,6.421,78.9,4.9671,2,242,17.8,9.14,21.6,0 0.02729,0,7.07,0,0.469,7.185,61.1,4.9671,2,242,17.8,4.03,34.7,1 0.03237,0,2.18,0,0.458,6.998,45.8,6.0622,3,222,18.7,2.94,33.4,1 0.06905,0,2.18,0,0.458,7.147,54.2,6.0622,3,222,18.7,5.33,36.2,1 0.02985,0,2.18,0,0.458,6.43,58.7,6.0622,3,222,18.7,5.21,28.7,0 0.08829,12.5,7.87,0,0.524,6.012,66.6,5.5605,5,311,15.2,12.43,22.9,0 0.14455,12.5,7.87,0,0.524,6.172,96.1,5.9505,5,311,15.2,19.15,27.1,0 0.21124,12.5,7.87,0,0.524,5.631,100,6.0821,5,311,15.2,29.93,16.5,0 0.17004,12.5,7.87,0,0.524,6.004,85.9,6.5921,5,311,15.2,17.1,18.9,0 0.22489,12.5,7.87,0,0.524,6.377,94.3,6.3467,5,311,15.2,20.45,15,0 0.11747,12.5,7.87,0,0.524,6.009,82.9,6.2267,5,311,15.2,13.27,18.9,0 0.09378,12.5,7.87,0,0.524,5.889,39,5.4509,5,311,15.2,15.71,21.7,0 0.62976,0,8.14,0,0.538,5.949,61.8,4.7075,4,307,21,8.26,20.4,0 0.63796,0,8.14,0,0.538,6.096,84.5,4.4619,4,307,21,10.26,18.2,0 0.62739,0,8.14,0,0.538,5.834,56.5,4.4986,4,307,21,8.47,19.9,0 1.05393,0,8.14,0,0.538,5.935,29.3,4.4986,4,307,21,6.58,23.1,0 0.7842,0,8.14,0,0.538,5.99,81.7,4.2579,4,307,21,14.67,17.5,0 0.80271,0,8.14,0,0.538,5.456,36.6,3.7965,4,307,21,11.69,20.2,0 0.7258,0,8.14,0,0.538,5.727,69.5,3.7965,4,307,21,11.28,18.2,0 1.25179,0,8.14,0,0.538,5.57,98.1,3.7979,4,307,21,21.02,13.6,0 0.85204,0,8.14,0,0.538,5.965,89.2,4.0123,4,307,21,13.83,19.6,0 1.23247,0,8.14,0,0.538,6.142,91.7,3.9769,4,307,21,18.72,15.2,0 0.98843,0,8.14,0,0.538,5.813,100,4.0952,4,307,21,19.88,14.5,0 0.75026,0,8.14,0,0.538,5.924,94.1,4.3996,4,307,21,16.3,15.6,0 0.84054,0,8.14,0,0.538,5.599,85.7,4.4546,4,307,21,16.51,13.9,0 0.67191,0,8.14,0,0.538,5.813,90.3,4.682,4,307,21,14.81,16.6,0 0.95577,0,8.14,0,0.538,6.047,88.8,4.4534,4,307,21,17.28,14.8,0 0.77299,0,8.14,0,0.538,6.495,94.4,4.4547,4,307,21,12.8,18.4,0 1.00245,0,8.14,0,0.538,6.674,87.3,4.239,4,307,21,11.98,21,0 1.13081,0,8.14,0,0.538,5.713,94.1,4.233,4,307,21,22.6,12.7,0 1.35472,0,8.14,0,0.538,6.072,100,4.175,4,307,21,13.04,14.5,0 1.38799,0,8.14,0,0.538,5.95,82,3.99,4,307,21,27.71,13.2,0 1.15172,0,8.14,0,0.538,5.701,95,3.7872,4,307,21,18.35,13.1,0 1.61282,0,8.14,0,0.538,6.096,96.9,3.7598,4,307,21,20.34,13.5,0 0.06417,0,5.96,0,0.499,5.933,68.2,3.3603,5,279,19.2,9.68,18.9,0 0.09744,0,5.96,0,0.499,5.841,61.4,3.3779,5,279,19.2,11.41,20,0 0.08014,0,5.96,0,0.499,5.85,41.5,3.9342,5,279,19.2,8.77,21,0 0.17505,0,5.96,0,0.499,5.966,30.2,3.8473,5,279,19.2,10.13,24.7,0 0.02763,75,2.95,0,0.428,6.595,21.8,5.4011,3,252,18.3,4.32,30.8,1 0.03359,75,2.95,0,0.428,7.024,15.8,5.4011,3,252,18.3,1.98,34.9,1 0.12744,0,6.91,0,0.448,6.77,2.9,5.7209,3,233,17.9,4.84,26.6,0 0.1415,0,6.91,0,0.448,6.169,6.6,5.7209,3,233,17.9,5.81,25.3,0 0.15936,0,6.91,0,0.448,6.211,6.5,5.7209,3,233,17.9,7.44,24.7,0 0.12269,0,6.91,0,0.448,6.069,40,5.7209,3,233,17.9,9.55,21.2,0 0.17142,0,6.91,0,0.448,5.682,33.8,5.1004,3,233,17.9,10.21,19.3,0 0.18836,0,6.91,0,0.448,5.786,33.3,5.1004,3,233,17.9,14.15,20,0 0.22927,0,6.91,0,0.448,6.03,85.5,5.6894,3,233,17.9,18.8,16.6,0 0.25387,0,6.91,0,0.448,5.399,95.3,5.87,3,233,17.9,30.81,14.4,0 0.21977,0,6.91,0,0.448,5.602,62,6.0877,3,233,17.9,16.2,19.4,0 0.08873,21,5.64,0,0.439,5.963,45.7,6.8147,4,243,16.8,13.45,19.7,0 0.04337,21,5.64,0,0.439,6.115,63,6.8147,4,243,16.8,9.43,20.5,0 0.0536,21,5.64,0,0.439,6.511,21.1,6.8147,4,243,16.8,5.28,25,0 0.04981,21,5.64,0,0.439,5.998,21.4,6.8147,4,243,16.8,8.43,23.4,0 0.0136,75,4,0,0.41,5.888,47.6,7.3197,3,469,21.1,14.8,18.9,0 0.01311,90,1.22,0,0.403,7.249,21.9,8.6966,5,226,17.9,4.81,35.4,1 0.02055,85,0.74,0,0.41,6.383,35.7,9.1876,2,313,17.3,5.77,24.7,0 0.01432,100,1.32,0,0.411,6.816,40.5,8.3248,5,256,15.1,3.95,31.6,1 0.15445,25,5.13,0,0.453,6.145,29.2,7.8148,8,284,19.7,6.86,23.3,0 0.10328,25,5.13,0,0.453,5.927,47.2,6.932,8,284,19.7,9.22,19.6,0 0.14932,25,5.13,0,0.453,5.741,66.2,7.2254,8,284,19.7,13.15,18.7,0 0.17171,25,5.13,0,0.453,5.966,93.4,6.8185,8,284,19.7,14.44,16,0 0.11027,25,5.13,0,0.453,6.456,67.8,7.2255,8,284,19.7,6.73,22.2,0 0.1265,25,5.13,0,0.453,6.762,43.4,7.9809,8,284,19.7,9.5,25,0 0.01951,17.5,1.38,0,0.4161,7.104,59.5,9.2229,3,216,18.6,8.05,33,1 0.03584,80,3.37,0,0.398,6.29,17.8,6.6115,4,337,16.1,4.67,23.5,0 0.04379,80,3.37,0,0.398,5.787,31.1,6.6115,4,337,16.1,10.24,19.4,0 0.05789,12.5,6.07,0,0.409,5.878,21.4,6.498,4,345,18.9,8.1,22,0 0.13554,12.5,6.07,0,0.409,5.594,36.8,6.498,4,345,18.9,13.09,17.4,0 0.12816,12.5,6.07,0,0.409,5.885,33,6.498,4,345,18.9,8.79,20.9,0 0.08826,0,10.81,0,0.413,6.417,6.6,5.2873,4,305,19.2,6.72,24.2,0 0.15876,0,10.81,0,0.413,5.961,17.5,5.2873,4,305,19.2,9.88,21.7,0 0.09164,0,10.81,0,0.413,6.065,7.8,5.2873,4,305,19.2,5.52,22.8,0 0.19539,0,10.81,0,0.413,6.245,6.2,5.2873,4,305,19.2,7.54,23.4,0 0.07896,0,12.83,0,0.437,6.273,6,4.2515,5,398,18.7,6.78,24.1,0 0.09512,0,12.83,0,0.437,6.286,45,4.5026,5,398,18.7,8.94,21.4,0 0.10153,0,12.83,0,0.437,6.279,74.5,4.0522,5,398,18.7,11.97,20,0 0.08707,0,12.83,0,0.437,6.14,45.8,4.0905,5,398,18.7,10.27,20.8,0 0.05646,0,12.83,0,0.437,6.232,53.7,5.0141,5,398,18.7,12.34,21.2,0 0.08387,0,12.83,0,0.437,5.874,36.6,4.5026,5,398,18.7,9.1,20.3,0 0.04113,25,4.86,0,0.426,6.727,33.5,5.4007
Answered Same DayMar 31, 2021

Answer To: Obs,Financial Condition,TotCap/Assets,TotExp/Assets,TotLns&Lses/Assets 1,1,9.7,0.12,0.65...

Bezawada Arun answered on Apr 03 2021
138 Votes
Please attach appropriate graphs to questions (if any), and include R script with word document in your submission
5.1: A data mining routine has been applied to a transaction dataset and has classified 88
records as fraudulent (30 correctly so) and 952 as non-fraudulent (920 correctly so).
Construct the confusion matrix and calculate the overall error rate
A) #Creating a confusion matrix
Let assume that fraud=1, non-fraud= 0
The dataset classified 88 records, of which 30 of the transactions are fraud and prediction is also true which gives “True Positive”
The remaining 58 are misclassified as fraud, which means they are actually non fraud but predicted as fraud     which gives “False Positive”
Also 952 are non-fraudulent and 920 of them predicted correctly, which means 920 is our “True Negative”
The remaining 32 cases in 952 non-fraudulent cases are misclassified as non-fraud, where they actually are fraud, which is gives “False Negative”
#Creating matrix
matrix(data=c(920,32,58,30), nrow=2, ncol=2, byrow=TRUE, dimnames= NULL)
In short TP=30, TN=920, FP=32, FN=58
Total= 30+920+32+58 = 1040
Error=(FP+FN)/Total = (32+58)/1040 = 0.0865
#R code
matrix(data=c(920,32,58,30), nrow=2, ncol=2, byrow=TRUE, dimnames= NULL)
[,1] [,2]
[1,] 920 32
[2,] 58 30
> print(matrix)
function (data = NA, nrow = 1, ncol = 1, byrow = FALSE, dimnames = NULL)
{
if (is.object(data) || !is.atomic(data))
data <- as.vector(data)
.Internal(matrix(data, nrow, ncol, byrow, dimnames, missing(nrow),
missing(ncol)))
}


> TP=30
> FN=58
> FP=32
> TN=920
> total=TP+FN+FP+TN
> print(total)
[1] 1040
> #calculation of error rate
> error= (FP+FN)/total
> print(error)
[1] 0.08653846
5.4: Consider Figure 5.12, the decile-wise lift chart for the transaction data model, applied
to new data.
FIGURE 5.12 DECILE-WISE LIFT CHART FOR TRANSACTION DATA
a. Interpret the meaning of the first and second bars from the left.
A) In a Decile- lift chart regarding the transaction data model, the top 10% of responses contain the highest predicted values are contained in (decile1). In this case, it is noted that approximately 65% of the overall responses are listed in top 10% which are enumerated correctly. The second decile consists of 26% of the overall responses which are enumerated in next top 10%. In a cumulative lift chart, we take the cumulative percent value for the combined deciles.
b. Explain how you might use this information in practice.
A) In practice, using decile charts we can evaluate the performance of the model. These charts can measure how much we can expect with a predictive modelling. We can know the flaws and proceed for improvements to make more responses appear in the top 10%.
c. Another analyst comments that you could improve the accuracy of the model by
classifying everything as nonfraudulent. If you do that, what is the error rate?
A) If the error rate is supposed to be non-fraudulent then the error rate is given by
Error rate= (TP+TN)/Total = (920+30)/1040 = 0.9134
d. Comment on the usefulness, in this situation, of these two metrics of model performance
(error rate and lift).
A) a) Evaluation of model tell show accurate and powerful our model is, Error rate is one of the metrics which defines how correct our model will be. Error is different in terms of train and test data. The lower our error rate, the better the model.
b) Lift is a metric that measures the effectiveness of the model which produces result in comparison to the results obtained using the model and without using the model. In this situation we can make use of lift to evaluate the model effectiveness and know the number of responses.
5.7: Table 5.7 shows a small set of predictive models
validation results for a classification
model, with both actual values and propensities.
a. Calculate error rates, sensitivity, and specificity using cutoffs of 0.25, 0.5, and 0.75.
A) R code
#creating a matrix from the table 5.7 data
> table <- matrix(c(.03,0,.52,0,.38,0,.82,1,.33,0,.42,0,.55,1,0.59,0,.09,
+ 0,.21,0,.43,0,.04,0,.08,0,.13,0,.01,0,.79,1,.42,0,.29,0,.08,0,.02,0),ncol=2, byrow=TRUE)
> #The above method is preferable because converting it into dataframe is long process
> #Assigning the column names
> colnames(table)<-c("Propensity of 1","Actual")
> #Printing the table
> table <- as.data.frame(table)
> print(table)
Propensity of 1 Actual
1 0.03 0
2 0.52 0
3 0.38 0
4 0.82 1
5 0.33 0
6 0.42 0
7 0.55 1
8 0.59 0
9 0.09 0
10 0.21 0
11 0.43 0
12 0.04 0
13 0.08 0
14 0.13 0
15 0.01 0
16 0.79 1
17 0.42 0
18 0.29 0
19 0.08 0
20 0.02 0
> #Calculating error rates, sensitivity and specificity
> #installing the necessary packages
> library(caret)
> #loading the lattice package
> library(e1071)
> #Confusion matrix
> confusionMatrix(as.factor(ifelse(table$`Propensity of 1`>0.25,'1','0')),
+ as.factor(table$Actual))
Confusion Matrix and Statistics
Reference
Prediction 0 1
0 9 0
1 8 3

Accuracy : 0.6
95% CI : (0.3605, 0.8088)
No Information Rate : 0.85
P-Value [Acc > NIR] : 0.99867

Kappa : 0.2523

Mcnemar's Test P-Value : 0.01333

Sensitivity : 0.5294
Specificity : 1.0000
Pos Pred Value : 1.0000
Neg Pred Value : 0.2727
Prevalence : 0.8500
Detection Rate : 0.4500
Detection Prevalence : 0.4500
Balanced Accuracy : 0.7647

'Positive' Class : 0

> #Confusion matrix, when cutoff=0.5
> confusionMatrix(as.factor(ifelse(table$`Propensity of 1`>0.5,'1','0')),
+ as.factor(table$Actual))
Confusion Matrix and Statistics
Reference
Prediction 0 1
0 15 0
1 2 3

Accuracy : 0.9
95% CI : (0.683, 0.9877)
No Information Rate : 0.85
P-Value [Acc > NIR] : 0.4049

Kappa : 0.6923

Mcnemar's Test P-Value : 0.4795

Sensitivity : 0.8824
Specificity : 1.0000
Pos Pred Value : 1.0000
Neg Pred Value : 0.6000
Prevalence : 0.8500
Detection Rate : 0.7500
Detection Prevalence : 0.7500
Balanced Accuracy : 0.9412

'Positive' Class : 0

> #Confusion matrix, when cutoff=0.75
> confusionMatrix(as.factor(ifelse(table$`Propensity of 1`>0.75,'1','0')),
+ as.factor(table$Actual))
Confusion Matrix and Statistics
Reference
Prediction 0 1
0 17 1
1 0 2

Accuracy : 0.95
95% CI : (0.7513, 0.9987)
No Information Rate : 0.85
P-Value [Acc > NIR] : 0.1756

Kappa : 0.7727

Mcnemar's Test P-Value : 1.0000

Sensitivity : 1.0000
Specificity : 0.6667
Pos Pred Value : 0.9444
Neg Pred Value : 1.0000
Prevalence : 0.8500
Detection Rate : 0.8500
Detection Prevalence : 0.9000
Balanced Accuracy : 0.8333

'Positive' Class : 0
b. Create a decile-wise lift chart in R.
A) A Decile explains the occurrence of maximum number of responses in the top 10%. It explains the data divided into ten equal deciles.
#Creating a decile wise lift chart
> #Installing the necessary packages
> library(gains)
> gain<- gains(table$Actual, table$`Propensity of 1`)
> #plotting the barplot within decile chart
> barplot(gain$mean.resp/mean(table$Actual), names.arg=gain$depth,
+ xlab = "Percentile",ylab = "Avg Response", main = "Decile-wise lift chart")
6.1: Predicting Boston Housing Prices. The file BostonHousing.csv contains information
collected by the US Bureau of the Census concerning housing in the area of
Boston, Massachusetts. The dataset includes information on 506 census housing tracts
in the Boston area. The goal is to predict the median house price in new tracts based
on information such as crime rate, pollution, and number of rooms. The dataset contains
13 predictors, and the response is the median house price (MEDV). Table 6.9
describes each of the predictors and the response.
a. Why should the data be partitioned into training and validation sets? What will the
training set be used for? What will the validation set be used for?
A) a) A training set is one which consists of outcomes that are already known. The data is divided into training set, which is working terrain to train an algorithm and study the data. The better percent of training set, the better the model would be.
b) A validation set is a subset just like train set of the data given. It shows how the relationship between the target variables and independent variables is deduced to make predictions.
b. Fit a multiple linear regression model to the median house price (MEDV) as a
function of CRIM, CHAS, and RM. Write the equation for predicting the median
house price from the predictors in the model.
A) #-------------Question 6.1------------------------
> # importing the boston dataset
> boston <- read.csv("bostonhousing-li41frau.csv", sep=",")
> model1 <- lm(boston$MEDV~boston$CRIM+boston$CHAS+boston$RM)
> model1
Call:
lm(formula = boston$MEDV ~ boston$CRIM + boston$CHAS + boston$RM)
Coefficients:
(Intercept) boston$CRIM boston$CHAS boston$RM
-28.8107 -0.2607 3.7630 8.2782
> fit.lm <- lm(MEDV~CRIM+CHAS+RM, data=boston)
> summary(fit.lm)
Call:
lm(formula = MEDV ~ CRIM + CHAS + RM, data = boston)
Residuals:
Min 1Q Median 3Q Max
-24.829 -2.968 -0.415 2.433 38.945
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -28.81068 2.56331 -11.240< 2e-16 ***
CRIM -0.26072 0.03274 -7.964 1.12e-14 ***
CHAS 3.76304 1.08620 3.464 0.000577 ***
RM 8.27818 0.40182 20.602 < 2e-16 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 6.17 on 502 degrees of freedom
Multiple R-squared: 0.5527,    Adjusted R-squared: 0.55
F-statistic: 206.7 on 3 and 502 DF, p-value: < 2.2e-16
> fit.lm$fitted.values
1 2 3 4 5 6
25.61670198 24.33638968 30.66092427 29.11158017 30.33546559 24.41023106
7 8 9 10 11 12
20.93471516 22.24455558 17.74867259 20.84717550 23.92063584 20.90227268
13 14 15 16 17 18
19.91506767 20.27201539 21.48676988 19.32064263 20.04552940 20.57115448
19 20 21 22 23 24
16.14578045 18.40921950 16.97240683 20.34651244 21.71256288 19.05266891
25 26 27 28 29 30
20.03364360 17.31969696 19.13519340 20.99827824 24.75455801 26.17652637
31 32 33 34 35 36
18.18772899 21.10121673 20.08260450 18.08293908 21.23260008 20.28702763
37 38 39 40 41 42
19.51676079 19.59577494 20.53129844 25.77670954 29.32649476 27.19936810
43 44 45 46 47 48
22.22051625 22.56354326 21.39760249 18.18124180 19.03775583 21.04696547
49 50 51 52 53 54
15.81702019 17.50638139 20.52896963 21.79907942 25.07457142 20.82885332
55 56 57 58 59 60
19.92769437 31.19442485 24.02358134 27.60965752 22.01846355 20.22716162
61 62 63 64 65 66
18.67541642 20.53216926 24.60449628 27.13338774 29.99242014 23.24972415
67 68 69 70 71 72
19.08372694 19.83336509 17.46211677 19.87299124 24.28738581 20.49415474
73 74 75 76 77 78
21.37258527 22.83560747 23.09775265 23.20115568 23.14153718 21.99464026
79 80 81 82 83 84
22.76421358 19.79347875 26.86590949 25.97095614 23.34886676 22.23159407
85 86 87 88 89 90
24.06541826 26.05869710 20.96904268 21.84141172 29.17976643 29.77672877
91 92 93 94 95 96
24.29818501 24.20080750 24.50639359 22.59759648 22.90846763 26.00043994
97 98 99 100 101 102
22.17774594 37.95444706 35.90333811 32.56241328 26.83787379 27.29384878
103 104 105 106 107 108
24.15141587 21.93733510 22.20445526 19.59037030 19.45613885 21.87552598
109 110 111 112 113 114
24.74887565 22.68536476 22.44450144 26.75100348 20.10605001 21.56207680
115 116 117 118 119 120
22.92395034 20.21769490 22.28104989 20.99287397 19.76474395 18.59382353
121 122 123 124 125 126
19.76424561 20.87282818 20.51130259 19.62713073 19.83105786 20.69843421
127 128 129 130 131 132
17.55374918 18.24942843 24.34144432 17.62365370 24.56114077 23.24605440
133 134 135 136 137 138
23.78403881 19.29888823 18.59228732 23.48615974 20.29414181 24.52482897
139 140 141 142 143 144
19.60948769 21.96643186 22.22295492 12.31287576 18.81348126 15.38611250
145 146 147 148 149 150
11.05248703 21.31420772 17.21703933 11.35007419 13.51221200 16.80947718
151 152 153 154 155 156
21.43641824 15.53447404 16.14886485 17.88910235 25.32069340 24.95805335
157 158 159 160 161 162
14.19397226 28.34570275 21.05464506 24.70873057 26.35895626 32.80307243
163 164 165 166 167 168
39.06060483 43.88606487 19.06514412 20.93213435 36.30289962 19.37080330
169 170 171 172 173 174
22.89936529 23.54757238 19.50882002 19.26172457 17.27905821 24.27818988
175 176 177 178 179 180
19.66914962 25.36090787 21.00565189 23.45187870 27.96031369 28.95594271
181 182 183 184 185 186
35.45220721 22.03249556 30.39596030 25.49291830 17.55857617 22.10919187
187 188 189 190 191 192
36.00113782 27.31140093 25.42826781 30.64621681 28.70730287 26.95795258
193 194 195 196 197 198
30.58750302 27.47523817 25.85466514 36.37638291 31.50195612 30.01017601
199 200 201 202 203 204
31.39497335 28.92140886 30.24949477 22.19047954 34.18058989 36.18871213
205 206 207 208 209 210
37.69097615 19.92050674 23.49719719 18.99633140 25.11581230 19.07734723
211 212 213 214 215 216
24.24482024 19.58966323 22.96711798 23.92607680 15.91533388 22.31339644
217 218 219 220 221 222
23.68238824 26.15470320 24.18694302 27.67940672 32.40061961 25.87275495
223 224 225 226 227 228
31.73537616 25.81404419 39.53453759 43.27905284 37.64624995 30.37840195
229 230 231 232 233 234
34.73766211 25.31276879 20.56110194 32.42648128 40.05451044 39.37304408
235 236 237 238 239 240
30.51461089 21.48416344 29.70923696 31.96671797 24.81870673 25.85085111
241 242 243 244 245 246
28.25438619 21.61715537 23.79515619 24.07846042 17.43544709 17.53863094
247 248 249 250 251 252
21.66383781 22.67801440 24.39998774 26.75240150 24.85329030 24.42842063
253 254 255 256 257 258
28.75918029 39.46261289 21.73987547 19.82265156 32.89085987 43.08315117
259 260 261 262 263 264
31.72021680 27.65741908 30.67622681 33.30197155 40.57385836 31.62837555
265 266 267 268 269 270
30.69846454 17.01742432 29.04761952 39.72258803 32.88639914 23.93554436
271 272 273 274 275 276
19.58834016 22.80289348 25.28217808 38.56198594 30.88157843 27.90292195
277 278 279 280 281 282
35.08259200 31.44323535 24.82767844 27.52542717 35.91535490 28.86201458
283 284 285 286 287 288
38.22305941 40.53645972 29.86269383 24.60555428 22.75725448 22.57844330
289 290 291 292 293 294
23.45405575 25.52436463 27.97677861 30.34118606 26.06422446 21.88817633
295 296 297 298 299 300
20.91152319 26.43728539 25.38911096...
SOLUTION.PDF

Answer To This Question Is Available To Download

Related Questions & Answers

More Questions »

Submit New Assignment

Copy and Paste Your Assignment Here