1 """
2 This module contains non incremental (batch or full gradient) functions.
3 """
4
5 import victor_utils, math
6
7
8
9
10
11
12
13
15 """
16 Calculates ell2 gradient.
17
18 @type w: vector
19 @param w: model vector
20 @type cur_grad: vector
21 @param cur_grad: current gradient
22 @type x: vector
23 @param x: feature vector of the example
24 @type y: number
25 @param y: label of the example
26 """
27 wx = victor_utils.dot(w, x)
28 err = (wx - y)
29 victor_utils.scale_and_add_i(cur_grad, x, err)
30
31
33 """
34 Computes w as a step
35 @type w: vector
36 @param w: model vector
37 @type grad: vector
38 @param grad: gradient
39 @type stepsize: double
40 @param stepsize: step size
41 @type mu: double
42 @param mu: mu
43 @type B: double
44 @param B: B
45 """
46
47 victor_utils.scale_and_add_i(w, grad, -stepsize)
48 victor_utils.l2_project(w, B)
49
50
51
52
54 """
55 Calculates svm gradient.
56
57 @type w: vector
58 @param w: model vector
59 @type cur_grad: vector
60 @param cur_grad: current gradient
61 @type x: vector
62 @param x: feature vector of the example
63 @type y: number
64 @param y: label of the example
65 """
66 wx = victor_utils.dot(w, x)
67 if(y*wx < 1):
68 victor_utils.scale_and_add_i(cur_grad, x, y)
69
71 """
72 Computes w as a step
73 @type w: vector
74 @param w: model vector
75 @type cur_grad: vector
76 @param cur_grad: gradient
77 @type stepsize: double
78 @param stepsize: step size
79 @type mu: double
80 @param mu: mu
81 @type B: double
82 @param B: B
83 """
84 victor_utils.scale_and_add_i(w, cur_grad, -stepsize)
85 victor_utils.scale(w, 1.0/(1.0 + stepsize * mu))
86
87
88
89
90
92 """
93 Calculates logit l1 sparse
94 @type w: vector
95 @param w: model vector
96 @type cur_grad: vector
97 @param cur_grad: current gradient
98 @type x_indexes: vector
99 @param x_indexes: indexes vector of the example
100 @type x_vectors: vector
101 @param x_vectors: values of the feature vector of the example
102 @type y: number
103 @param y: label of the example
104 """
105 wx = victor_utils.dot_dss(w,x_indexes,x_vectors)
106 sig = victor_utils.sigma(-wx*y)
107 victor_utils.scale_and_add_dss(cur_grad, x_indexes, x_vectors, -y*sig)
108
110 """
111 Computes l1 shrink prox
112 @type w: vector
113 @param w: model vector
114 @type cur_grad: vector
115 @param cur_grad: gradient
116 @type stepsize: double
117 @param stepsize: step size
118 @type mu: double
119 @param mu: mu
120 @type B: double
121 @param B: B
122 """
123 victor_utils.scale_and_add_di(w,cur_grad, -stepsize)
124 victor_utils.l1_shrink_mask(w, mu*step)
125
127 """
128 Returns logit loss
129 @type w: vector
130 @param w: model vector
131 @type x_indexes: vector
132 @param x_indexes: indexes vector of the example
133 @type x_values: vector
134 @param x_values: values of the feature vector of the example
135 @rtype: double
136 @return: logit loss of the given model and the example
137 """
138 wx = victor_utils.dot_dss(w,x_indexes,x_values)
139 return math.log(1 + math.exp(-y*wx))
140