-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathproject_references.bibtex
71 lines (66 loc) · 4.93 KB
/
project_references.bibtex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
@inproceedings{Velloso2013,
author = {Velloso, Eduardo and Bulling, Andreas and Gellersen, Hans and Ugulino, Wallace and Fuks, Hugo},
title = {Qualitative Activity Recognition of Weight Lifting Exercises},
booktitle = {Proceedings of the 4th Augmented Human International Conference},
series = {AH '13},
year = {2013},
isbn = {978-1-4503-1904-1},
location = {Stuttgart, Germany},
pages = {116--123},
numpages = {8},
url = {http://doi.acm.org/10.1145/2459236.2459256},
doi = {10.1145/2459236.2459256},
acmid = {2459256},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {qualitative activity recognition, real-time user feedback, weight lifting},
abstract = {Research on activity recognition has traditionally focused on discriminating between different activities, i.e. to predict which activity was performed at a specific point in time. The quality of executing an activity, the how (well), has only received little attention so far, even though it potentially provides useful information for a large variety of applications. In this work we define quality of execution and investigate three aspects that pertain to qualitative activity recognition: specifying correct execution, detecting execution mistakes, providing feedback to the user. We illustrate our approach on the example problem of qualitatively assessing and providing feedback on weight lifting exercises. In two user studies we try out a sensor- and a model-based approach to qualitative activity recognition. Our results underline the potential of model-based assessment and the positive impact of real-time user feedback on the quality of execution.}
}
@book{elements,
address = {New York, NY, USA},
author = {Hastie, Trevor and Tibshirani, Robert and Friedman, Jerome},
interhash = {d585aea274f2b9b228fc1629bc273644},
intrahash = {f58afc5c9793fcc8ad8389824e57984c},
publisher = {Springer New York Inc.},
series = {Springer Series in Statistics},
title = {The Elements of Statistical Learning},
year = 2001
}
@Article{Bylander2002,
author="Bylander, Tom",
title="Estimating Generalization Error on Two-Class Datasets Using Out-of-Bag Estimates",
journal="Machine Learning",
year="2002",
volume="48",
number="1",
pages="287--297",
abstract="For two-class datasets, we provide a method for estimating the generalization error of a bag using out-of-bag estimates. In bagging, each predictor (single hypothesis) is learned from a bootstrap sample of the training examples; the output of a bag (a set of predictors) on an example is determined by voting. The out-of-bag estimate is based on recording the votes of each predictor on those training examples omitted from its bootstrap sample. Because no additional predictors are generated, the out-of-bag estimate requires considerably less time than 10-fold cross-validation. We address the question of how to use the out-of-bag estimate to estimate generalization error on two-class datasets. Our experiments on several datasets show that the out-of-bag estimate and 10-fold cross-validation have similar performance, but are both biased. We can eliminate most of the bias in the out-of-bag estimate and increase accuracy by incorporating a correction based on the distribution of the out-of-bag votes.",
issn="1573-0565",
doi="10.1023/A:1013964023376",
url="http://dx.doi.org/10.1023/A:1013964023376"
}
@Article{Breiman2001,
author="Breiman, Leo",
title="Random Forests",
journal="Machine Learning",
year="2001",
volume="45",
number="1",
pages="5--32",
abstract="Random forests are a combination of tree predictors such that each tree depends on the values of a random vector sampled independently and with the same distribution for all trees in the forest. The generalization error for forests converges a.s. to a limit as the number of trees in the forest becomes large. The generalization error of a forest of tree classifiers depends on the strength of the individual trees in the forest and the correlation between them. Using a random selection of features to split each node yields error rates that compare favorably to Adaboost (Y. Freund {\&} R. Schapire, Machine Learning: Proceedings of the Thirteenth International conference, ***, 148--156), but are more robust with respect to noise. Internal estimates monitor error, strength, and correlation and these are used to show the response to increasing the number of features used in the splitting. Internal estimates are also used to measure variable importance. These ideas are also applicable to regression.",
issn="1573-0565",
doi="10.1023/A:1010933404324",
url="http://dx.doi.org/10.1023/A:1010933404324"
}
@MISC{Tibshirani96bias-variance,
author = {Robert Tibshirani},
title = {Bias, Variance and Prediction Error for Classification Rules},
year = {1996},
publisher = {University of Toronto Department of Statistics}
}
@MISC{Statistics_out-of-bagestimation,
author = {Leo Breiman},
title = {Out-Of-Bag Estimation},
year = {1996},
publisher = {UC Berkeley Department of Statistics}
}