xburst: switch to 3.8
[openwrt.git] / target / linux / xburst / patches-3.3 / 0010-cpufreq_stats-Support-runtime-changes-to-frequency-t.patch
1 From ca40c7542f0cd0e0dfa074bd4ccefc04b8561427 Mon Sep 17 00:00:00 2001
2 From: Maarten ter Huurne <maarten@treewalker.org>
3 Date: Tue, 2 Aug 2011 10:26:09 +0200
4 Subject: [PATCH 10/21] cpufreq_stats: Support runtime changes to frequency
5  table.
6
7 ---
8  drivers/cpufreq/cpufreq_stats.c |  161 ++++++++++++++++++++-------------------
9  1 files changed, 83 insertions(+), 78 deletions(-)
10
11 --- a/drivers/cpufreq/cpufreq_stats.c
12 +++ b/drivers/cpufreq/cpufreq_stats.c
13 @@ -20,6 +20,7 @@
14  #include <linux/kobject.h>
15  #include <linux/spinlock.h>
16  #include <linux/notifier.h>
17 +#include <linux/string.h>
18  #include <asm/cputime.h>
19  
20  static spinlock_t cpufreq_stats_lock;
21 @@ -36,7 +37,7 @@ struct cpufreq_stats {
22         unsigned long long  last_time;
23         unsigned int max_state;
24         unsigned int state_num;
25 -       unsigned int last_index;
26 +       int last_index;
27         cputime64_t *time_in_state;
28         unsigned int *freq_table;
29  #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
30 @@ -59,7 +60,7 @@ static int cpufreq_stats_update(unsigned
31         cur_time = get_jiffies_64();
32         spin_lock(&cpufreq_stats_lock);
33         stat = per_cpu(cpufreq_stats_table, cpu);
34 -       if (stat->time_in_state)
35 +       if (stat->time_in_state && stat->last_index != -1)
36                 stat->time_in_state[stat->last_index] +=
37                         cur_time - stat->last_time;
38         stat->last_time = cur_time;
39 @@ -81,7 +82,7 @@ static ssize_t show_time_in_state(struct
40         ssize_t len = 0;
41         int i;
42         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
43 -       if (!stat)
44 +       if (!stat || !stat->time_in_state)
45                 return 0;
46         cpufreq_stats_update(stat->cpu);
47         for (i = 0; i < stat->state_num; i++) {
48 @@ -99,7 +100,7 @@ static ssize_t show_trans_table(struct c
49         int i, j;
50  
51         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
52 -       if (!stat)
53 +       if (!stat || !stat->trans_table)
54                 return 0;
55         cpufreq_stats_update(stat->cpu);
56         len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
57 @@ -158,63 +159,35 @@ static struct attribute_group stats_attr
58  static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
59  {
60         int index;
61 -       for (index = 0; index < stat->max_state; index++)
62 -               if (stat->freq_table[index] == freq)
63 -                       return index;
64 +       if (stat->freq_table)
65 +               for (index = 0; index < stat->max_state; index++)
66 +                       if (stat->freq_table[index] == freq)
67 +                               return index;
68         return -1;
69  }
70  
71 -/* should be called late in the CPU removal sequence so that the stats
72 - * memory is still available in case someone tries to use it.
73 - */
74  static void cpufreq_stats_free_table(unsigned int cpu)
75  {
76         struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
77 +       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
78 +       if (policy && policy->cpu == cpu)
79 +               sysfs_remove_group(&policy->kobj, &stats_attr_group);
80         if (stat) {
81                 kfree(stat->time_in_state);
82                 kfree(stat);
83         }
84         per_cpu(cpufreq_stats_table, cpu) = NULL;
85 -}
86 -
87 -/* must be called early in the CPU removal sequence (before
88 - * cpufreq_remove_dev) so that policy is still valid.
89 - */
90 -static void cpufreq_stats_free_sysfs(unsigned int cpu)
91 -{
92 -       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
93 -       if (policy && policy->cpu == cpu)
94 -               sysfs_remove_group(&policy->kobj, &stats_attr_group);
95         if (policy)
96                 cpufreq_cpu_put(policy);
97  }
98  
99 -static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
100 +static int cpufreq_stats_update_table(struct cpufreq_policy *policy,
101                 struct cpufreq_frequency_table *table)
102  {
103 -       unsigned int i, j, count = 0, ret = 0;
104 -       struct cpufreq_stats *stat;
105 -       struct cpufreq_policy *data;
106 +       unsigned int i, j, count = 0;
107         unsigned int alloc_size;
108         unsigned int cpu = policy->cpu;
109 -       if (per_cpu(cpufreq_stats_table, cpu))
110 -               return -EBUSY;
111 -       stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
112 -       if ((stat) == NULL)
113 -               return -ENOMEM;
114 -
115 -       data = cpufreq_cpu_get(cpu);
116 -       if (data == NULL) {
117 -               ret = -EINVAL;
118 -               goto error_get_fail;
119 -       }
120 -
121 -       ret = sysfs_create_group(&data->kobj, &stats_attr_group);
122 -       if (ret)
123 -               goto error_out;
124 -
125 -       stat->cpu = cpu;
126 -       per_cpu(cpufreq_stats_table, cpu) = stat;
127 +       struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
128  
129         for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
130                 unsigned int freq = table[i].frequency;
131 @@ -223,40 +196,73 @@ static int cpufreq_stats_create_table(st
132                 count++;
133         }
134  
135 +       if (stat->max_state != count) {
136 +               stat->max_state = count;
137 +               kfree(stat->time_in_state);
138 +               stat->time_in_state = NULL;
139 +       }
140         alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
141 -
142  #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
143         alloc_size += count * count * sizeof(int);
144  #endif
145 -       stat->max_state = count;
146 -       stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
147 -       if (!stat->time_in_state) {
148 -               ret = -ENOMEM;
149 -               goto error_out;
150 -       }
151 -       stat->freq_table = (unsigned int *)(stat->time_in_state + count);
152 -
153 +       if (stat->time_in_state) {
154 +               memset(stat->time_in_state, 0, alloc_size);
155 +       } else {
156 +               stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
157 +               if (!stat->time_in_state)
158 +                       return -ENOMEM;
159 +               stat->freq_table = (unsigned int *)(
160 +                               stat->time_in_state + count);
161  #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
162 -       stat->trans_table = stat->freq_table + count;
163 +               stat->trans_table = stat->freq_table + count;
164  #endif
165 +       }
166 +
167         j = 0;
168 -       for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
169 -               unsigned int freq = table[i].frequency;
170 -               if (freq == CPUFREQ_ENTRY_INVALID)
171 -                       continue;
172 -               if (freq_table_get_index(stat, freq) == -1)
173 -                       stat->freq_table[j++] = freq;
174 +       if (stat->freq_table) {
175 +               for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
176 +                       unsigned int freq = table[i].frequency;
177 +                       if (freq == CPUFREQ_ENTRY_INVALID)
178 +                               continue;
179 +                       if (freq_table_get_index(stat, freq) == -1)
180 +                               stat->freq_table[j++] = freq;
181 +               }
182         }
183         stat->state_num = j;
184         spin_lock(&cpufreq_stats_lock);
185         stat->last_time = get_jiffies_64();
186         stat->last_index = freq_table_get_index(stat, policy->cur);
187         spin_unlock(&cpufreq_stats_lock);
188 +       return 0;
189 +}
190 +
191 +static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
192 +               struct cpufreq_frequency_table *table)
193 +{
194 +       unsigned int ret = 0;
195 +       struct cpufreq_stats *stat;
196 +       struct cpufreq_policy *data;
197 +       unsigned int cpu = policy->cpu;
198 +
199 +       stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
200 +       if ((stat) == NULL)
201 +               return -ENOMEM;
202 +
203 +       data = cpufreq_cpu_get(cpu);
204 +       if (data == NULL) {
205 +               ret = -EINVAL;
206 +               goto error_out;
207 +       }
208 +       ret = sysfs_create_group(&data->kobj, &stats_attr_group);
209         cpufreq_cpu_put(data);
210 +       if (ret)
211 +               goto error_out;
212 +
213 +       stat->cpu = cpu;
214 +       per_cpu(cpufreq_stats_table, cpu) = stat;
215 +
216         return 0;
217  error_out:
218 -       cpufreq_cpu_put(data);
219 -error_get_fail:
220         kfree(stat);
221         per_cpu(cpufreq_stats_table, cpu) = NULL;
222         return ret;
223 @@ -274,10 +280,12 @@ static int cpufreq_stat_notifier_policy(
224         table = cpufreq_frequency_get_table(cpu);
225         if (!table)
226                 return 0;
227 -       ret = cpufreq_stats_create_table(policy, table);
228 -       if (ret)
229 -               return ret;
230 -       return 0;
231 +       if (!per_cpu(cpufreq_stats_table, cpu)) {
232 +               ret = cpufreq_stats_create_table(policy, table);
233 +               if (ret)
234 +                       return ret;
235 +       }
236 +       return cpufreq_stats_update_table(policy, table);
237  }
238  
239  static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
240 @@ -297,21 +305,23 @@ static int cpufreq_stat_notifier_trans(s
241         old_index = stat->last_index;
242         new_index = freq_table_get_index(stat, freq->new);
243  
244 -       /* We can't do stat->time_in_state[-1]= .. */
245 -       if (old_index == -1 || new_index == -1)
246 -               return 0;
247 -
248         cpufreq_stats_update(freq->cpu);
249 -
250         if (old_index == new_index)
251                 return 0;
252  
253 +       if (new_index == -1)
254 +               return 0;
255 +
256         spin_lock(&cpufreq_stats_lock);
257         stat->last_index = new_index;
258 +       if (old_index != -1) {
259  #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
260 -       stat->trans_table[old_index * stat->max_state + new_index]++;
261 +               if (stat->trans_table)
262 +                       stat->trans_table[old_index * stat->max_state +
263 +                                         new_index]++;
264  #endif
265 -       stat->total_trans++;
266 +               stat->total_trans++;
267 +       }
268         spin_unlock(&cpufreq_stats_lock);
269         return 0;
270  }
271 @@ -327,9 +337,6 @@ static int __cpuinit cpufreq_stat_cpu_ca
272         case CPU_ONLINE_FROZEN:
273                 cpufreq_update_policy(cpu);
274                 break;
275 -       case CPU_DOWN_PREPARE:
276 -               cpufreq_stats_free_sysfs(cpu);
277 -               break;
278         case CPU_DEAD:
279         case CPU_DEAD_FROZEN:
280                 cpufreq_stats_free_table(cpu);
281 @@ -338,10 +345,9 @@ static int __cpuinit cpufreq_stat_cpu_ca
282         return NOTIFY_OK;
283  }
284  
285 -/* priority=1 so this will get called before cpufreq_remove_dev */
286 -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
287 +static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
288 +{
289         .notifier_call = cpufreq_stat_cpu_callback,
290 -       .priority = 1,
291  };
292  
293  static struct notifier_block notifier_policy_block = {
294 @@ -388,7 +394,6 @@ static void __exit cpufreq_stats_exit(vo
295         unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
296         for_each_online_cpu(cpu) {
297                 cpufreq_stats_free_table(cpu);
298 -               cpufreq_stats_free_sysfs(cpu);
299         }
300  }
301