1 From 151d7e91baaa4016ba687b80e8f7ccead62d6c72 Mon Sep 17 00:00:00 2001
2 From: Stephen Boyd <sboyd@codeaurora.org>
3 Date: Tue, 25 Mar 2014 13:37:55 -0700
4 Subject: [PATCH 165/182] clk: qcom: Add support for muxes, dividers, and mux
7 The Krait CPU clocks are made up of muxes and dividers with a
8 handful of sources. Add a set of clk_ops that allow us to
9 configure these clocks so we can support CPU frequency scaling on
12 Based on code originally written by Saravana Kannan.
14 Cc: Saravana Kannan <skannan@codeaurora.org>
15 Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
17 drivers/clk/qcom/Makefile | 1 +
18 drivers/clk/qcom/clk-generic.c | 405 +++++++++++++++++++++++++++++++++++
19 include/linux/clk/msm-clk-generic.h | 208 ++++++++++++++++++
20 3 files changed, 614 insertions(+)
21 create mode 100644 drivers/clk/qcom/clk-generic.c
22 create mode 100644 include/linux/clk/msm-clk-generic.h
24 --- a/drivers/clk/qcom/Makefile
25 +++ b/drivers/clk/qcom/Makefile
26 @@ -6,6 +6,7 @@ clk-qcom-y += clk-pll.o
27 clk-qcom-y += clk-rcg.o
28 clk-qcom-y += clk-rcg2.o
29 clk-qcom-y += clk-branch.o
30 +clk-qcom-y += clk-generic.o
33 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
35 +++ b/drivers/clk/qcom/clk-generic.c
38 + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
40 + * This software is licensed under the terms of the GNU General Public
41 + * License version 2, as published by the Free Software Foundation, and
42 + * may be copied, distributed, and modified under those terms.
44 + * This program is distributed in the hope that it will be useful,
45 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
46 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
47 + * GNU General Public License for more details.
50 +#include <linux/kernel.h>
51 +#include <linux/export.h>
52 +#include <linux/bug.h>
53 +#include <linux/err.h>
54 +#include <linux/clk-provider.h>
55 +#include <linux/clk/msm-clk-generic.h>
58 +/* ==================== Mux clock ==================== */
60 +static int mux_set_parent(struct clk_hw *hw, u8 sel)
62 + struct mux_clk *mux = to_mux_clk(hw);
64 + if (mux->parent_map)
65 + sel = mux->parent_map[sel];
67 + return mux->ops->set_mux_sel(mux, sel);
70 +static u8 mux_get_parent(struct clk_hw *hw)
72 + struct mux_clk *mux = to_mux_clk(hw);
73 + int num_parents = __clk_get_num_parents(hw->clk);
77 + sel = mux->ops->get_mux_sel(mux);
78 + if (mux->parent_map) {
79 + for (i = 0; i < num_parents; i++)
80 + if (sel == mux->parent_map[i])
82 + WARN(1, "Can't find parent\n");
89 +static int mux_enable(struct clk_hw *hw)
91 + struct mux_clk *mux = to_mux_clk(hw);
92 + if (mux->ops->enable)
93 + return mux->ops->enable(mux);
97 +static void mux_disable(struct clk_hw *hw)
99 + struct mux_clk *mux = to_mux_clk(hw);
100 + if (mux->ops->disable)
101 + return mux->ops->disable(mux);
104 +static struct clk *mux_get_safe_parent(struct clk_hw *hw)
107 + struct mux_clk *mux = to_mux_clk(hw);
108 + int num_parents = __clk_get_num_parents(hw->clk);
110 + if (!mux->has_safe_parent)
114 + if (mux->parent_map)
115 + for (i = 0; i < num_parents; i++)
116 + if (mux->safe_sel == mux->parent_map[i])
119 + return clk_get_parent_by_index(hw->clk, i);
122 +const struct clk_ops clk_ops_gen_mux = {
123 + .enable = mux_enable,
124 + .disable = mux_disable,
125 + .set_parent = mux_set_parent,
126 + .get_parent = mux_get_parent,
127 + .determine_rate = __clk_mux_determine_rate,
128 + .get_safe_parent = mux_get_safe_parent,
130 +EXPORT_SYMBOL_GPL(clk_ops_gen_mux);
132 +/* ==================== Divider clock ==================== */
134 +static long __div_round_rate(struct div_data *data, unsigned long rate,
135 + struct clk *parent, unsigned int *best_div, unsigned long *best_prate,
138 + unsigned int div, min_div, max_div, _best_div = 1;
139 + unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
140 + unsigned int numer;
142 + rate = max(rate, 1UL);
144 + min_div = max(data->min_div, 1U);
145 + max_div = min(data->max_div, (unsigned int) (ULONG_MAX / rate));
148 + * div values are doubled for half dividers.
149 + * Adjust for that by picking a numer of 2.
151 + numer = data->is_half_divider ? 2 : 1;
154 + prate = *best_prate * numer;
155 + div = DIV_ROUND_UP(prate, rate);
156 + div = clamp(1U, div, max_div);
159 + return mult_frac(*best_prate, numer, div);
162 + for (div = min_div; div <= max_div; div++) {
163 + req_prate = mult_frac(rate, div, numer);
164 + prate = __clk_round_rate(parent, req_prate);
165 + if (IS_ERR_VALUE(prate))
168 + actual_rate = mult_frac(prate, numer, div);
169 + if (is_better_rate(rate, rrate, actual_rate)) {
170 + rrate = actual_rate;
172 + _best_prate = prate;
176 + * Trying higher dividers is only going to ask the parent for
177 + * a higher rate. If it can't even output a rate higher than
178 + * the one we request for this divider, the parent is not
179 + * going to be able to output an even higher rate required
180 + * for a higher divider. So, stop trying higher dividers.
182 + if (actual_rate < rate)
192 + *best_div = _best_div;
194 + *best_prate = _best_prate;
199 +static long div_round_rate(struct clk_hw *hw, unsigned long rate,
200 + unsigned long *parent_rate)
202 + struct div_clk *d = to_div_clk(hw);
203 + bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT;
205 + return __div_round_rate(&d->data, rate, __clk_get_parent(hw->clk),
206 + NULL, parent_rate, set_parent);
209 +static int div_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
212 + struct div_clk *d = to_div_clk(hw);
214 + struct div_data *data = &d->data;
216 + div = parent_rate / rate;
217 + if (div != data->div)
218 + rc = d->ops->set_div(d, div);
224 +static int div_enable(struct clk_hw *hw)
226 + struct div_clk *d = to_div_clk(hw);
227 + if (d->ops && d->ops->enable)
228 + return d->ops->enable(d);
232 +static void div_disable(struct clk_hw *hw)
234 + struct div_clk *d = to_div_clk(hw);
235 + if (d->ops && d->ops->disable)
236 + return d->ops->disable(d);
239 +static unsigned long div_recalc_rate(struct clk_hw *hw, unsigned long prate)
241 + struct div_clk *d = to_div_clk(hw);
242 + unsigned int div = d->data.div;
244 + if (d->ops && d->ops->get_div)
245 + div = max(d->ops->get_div(d), 1);
246 + div = max(div, 1U);
248 + if (!d->ops || !d->ops->set_div)
249 + d->data.min_div = d->data.max_div = div;
252 + return prate / div;
255 +const struct clk_ops clk_ops_div = {
256 + .enable = div_enable,
257 + .disable = div_disable,
258 + .round_rate = div_round_rate,
259 + .set_rate = div_set_rate,
260 + .recalc_rate = div_recalc_rate,
262 +EXPORT_SYMBOL_GPL(clk_ops_div);
264 +/* ==================== Mux_div clock ==================== */
266 +static int mux_div_clk_enable(struct clk_hw *hw)
268 + struct mux_div_clk *md = to_mux_div_clk(hw);
270 + if (md->ops->enable)
271 + return md->ops->enable(md);
275 +static void mux_div_clk_disable(struct clk_hw *hw)
277 + struct mux_div_clk *md = to_mux_div_clk(hw);
279 + if (md->ops->disable)
280 + return md->ops->disable(md);
283 +static long __mux_div_round_rate(struct clk_hw *hw, unsigned long rate,
284 + struct clk **best_parent, int *best_div, unsigned long *best_prate)
286 + struct mux_div_clk *md = to_mux_div_clk(hw);
288 + unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
289 + struct clk *_best_parent = 0;
290 + int num_parents = __clk_get_num_parents(hw->clk);
291 + bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT;
293 + for (i = 0; i < num_parents; i++) {
295 + unsigned long prate;
296 + struct clk *p = clk_get_parent_by_index(hw->clk, i);
298 + rrate = __div_round_rate(&md->data, rate, p, &div, &prate,
301 + if (is_better_rate(rate, best, rrate)) {
304 + _best_prate = prate;
313 + *best_div = _best_div;
315 + *best_prate = _best_prate;
317 + *best_parent = _best_parent;
324 +static long mux_div_clk_round_rate(struct clk_hw *hw, unsigned long rate,
325 + unsigned long *parent_rate)
327 + return __mux_div_round_rate(hw, rate, NULL, NULL, parent_rate);
330 +/* requires enable lock to be held */
331 +static int __set_src_div(struct mux_div_clk *md, u8 src_sel, u32 div)
335 + rc = md->ops->set_src_div(md, src_sel, div);
337 + md->data.div = div;
338 + md->src_sel = src_sel;
344 +/* Must be called after handoff to ensure parent clock rates are initialized */
345 +static int safe_parent_init_once(struct clk_hw *hw)
347 + unsigned long rrate;
349 + struct clk *best_parent;
350 + struct mux_div_clk *md = to_mux_div_clk(hw);
352 + if (IS_ERR(md->safe_parent))
354 + if (!md->safe_freq || md->safe_parent)
357 + rrate = __mux_div_round_rate(hw, md->safe_freq, &best_parent,
360 + if (rrate == md->safe_freq) {
361 + md->safe_div = best_div;
362 + md->safe_parent = best_parent;
364 + md->safe_parent = ERR_PTR(-EINVAL);
371 +__mux_div_clk_set_rate_and_parent(struct clk_hw *hw, u8 index, u32 div)
373 + struct mux_div_clk *md = to_mux_div_clk(hw);
376 + rc = safe_parent_init_once(hw);
380 + return __set_src_div(md, index, div);
383 +static int mux_div_clk_set_rate_and_parent(struct clk_hw *hw,
384 + unsigned long rate, unsigned long parent_rate, u8 index)
386 + return __mux_div_clk_set_rate_and_parent(hw, index, parent_rate / rate);
389 +static int mux_div_clk_set_rate(struct clk_hw *hw,
390 + unsigned long rate, unsigned long parent_rate)
392 + struct mux_div_clk *md = to_mux_div_clk(hw);
393 + return __mux_div_clk_set_rate_and_parent(hw, md->src_sel,
394 + parent_rate / rate);
397 +static int mux_div_clk_set_parent(struct clk_hw *hw, u8 index)
399 + struct mux_div_clk *md = to_mux_div_clk(hw);
400 + return __mux_div_clk_set_rate_and_parent(hw, md->parent_map[index],
404 +static u8 mux_div_clk_get_parent(struct clk_hw *hw)
406 + struct mux_div_clk *md = to_mux_div_clk(hw);
407 + int num_parents = __clk_get_num_parents(hw->clk);
410 + md->ops->get_src_div(md, &sel, &div);
413 + for (i = 0; i < num_parents; i++)
414 + if (sel == md->parent_map[i])
416 + WARN(1, "Can't find parent\n");
420 +static unsigned long
421 +mux_div_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
423 + struct mux_div_clk *md = to_mux_div_clk(hw);
426 + md->ops->get_src_div(md, &sel, &div);
428 + return prate / div;
431 +const struct clk_ops clk_ops_mux_div_clk = {
432 + .enable = mux_div_clk_enable,
433 + .disable = mux_div_clk_disable,
434 + .set_rate_and_parent = mux_div_clk_set_rate_and_parent,
435 + .set_rate = mux_div_clk_set_rate,
436 + .set_parent = mux_div_clk_set_parent,
437 + .round_rate = mux_div_clk_round_rate,
438 + .get_parent = mux_div_clk_get_parent,
439 + .recalc_rate = mux_div_clk_recalc_rate,
441 +EXPORT_SYMBOL_GPL(clk_ops_mux_div_clk);
443 +++ b/include/linux/clk/msm-clk-generic.h
446 + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
448 + * This software is licensed under the terms of the GNU General Public
449 + * License version 2, as published by the Free Software Foundation, and
450 + * may be copied, distributed, and modified under those terms.
452 + * This program is distributed in the hope that it will be useful,
453 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
454 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
455 + * GNU General Public License for more details.
458 +#ifndef __QCOM_CLK_GENERIC_H__
459 +#define __QCOM_CLK_GENERIC_H__
461 +#include <linux/err.h>
462 +#include <linux/clk-provider.h>
464 +static inline bool is_better_rate(unsigned long req, unsigned long best,
467 + if (IS_ERR_VALUE(new))
470 + return (req <= new && new < best) || (best < req && best < new);
473 +/* ==================== Mux clock ==================== */
477 +struct clk_mux_ops {
478 + int (*set_mux_sel)(struct mux_clk *clk, int sel);
479 + int (*get_mux_sel)(struct mux_clk *clk);
482 + bool (*is_enabled)(struct mux_clk *clk);
483 + int (*enable)(struct mux_clk *clk);
484 + void (*disable)(struct mux_clk *clk);
488 + /* Parents in decreasing order of preference for obtaining rates. */
490 + bool has_safe_parent;
492 + const struct clk_mux_ops *ops;
494 + /* Fields not used by helper function. */
495 + void __iomem *base;
507 +static inline struct mux_clk *to_mux_clk(struct clk_hw *hw)
509 + return container_of(hw, struct mux_clk, hw);
512 +extern const struct clk_ops clk_ops_gen_mux;
514 +/* ==================== Divider clock ==================== */
518 +struct clk_div_ops {
519 + int (*set_div)(struct div_clk *clk, int div);
520 + int (*get_div)(struct div_clk *clk);
521 + bool (*is_enabled)(struct div_clk *clk);
522 + int (*enable)(struct div_clk *clk);
523 + void (*disable)(struct div_clk *clk);
528 + unsigned int min_div;
529 + unsigned int max_div;
531 + * Indicate whether this divider clock supports half-interger divider.
532 + * If it is, all the min_div and max_div have been doubled. It means
535 + bool is_half_divider;
539 + struct div_data data;
542 + const struct clk_div_ops *ops;
544 + /* Fields not used by helper function. */
545 + void __iomem *base;
554 +static inline struct div_clk *to_div_clk(struct clk_hw *hw)
556 + return container_of(hw, struct div_clk, hw);
559 +extern const struct clk_ops clk_ops_div;
561 +#define DEFINE_FIXED_DIV_CLK(clk_name, _div, _parent) \
562 +static struct div_clk clk_name = { \
568 + .hw.init = &(struct clk_init_data){ \
569 + .parent_names = (const char *[]){ _parent }, \
570 + .num_parents = 1, \
571 + .name = #clk_name, \
572 + .ops = &clk_ops_div, \
573 + .flags = CLK_SET_RATE_PARENT, \
577 +/* ==================== Mux Div clock ==================== */
582 + * struct mux_div_ops
583 + * the enable and disable ops are optional.
586 +struct mux_div_ops {
587 + int (*set_src_div)(struct mux_div_clk *, u32 src_sel, u32 div);
588 + void (*get_src_div)(struct mux_div_clk *, u32 *src_sel, u32 *div);
589 + int (*enable)(struct mux_div_clk *);
590 + void (*disable)(struct mux_div_clk *);
591 + bool (*is_enabled)(struct mux_div_clk *);
595 + * struct mux_div_clk - combined mux/divider clock
597 + parameters needed by ops
599 + when switching rates from A to B, the mux div clock will
600 + instead switch from A -> safe_freq -> B. This allows the
601 + mux_div clock to change rates while enabled, even if this
602 + behavior is not supported by the parent clocks.
604 + If changing the rate of parent A also causes the rate of
605 + parent B to change, then safe_freq must be defined.
607 + safe_freq is expected to have a source clock which is always
608 + on and runs at only one rate.
610 + list of parents and mux indicies
612 + function pointers for hw specific operations
614 + the mux index which will be used if the clock is enabled.
617 +struct mux_div_clk {
618 + /* Required parameters */
619 + const struct mux_div_ops *ops;
620 + struct div_data data;
628 + /* Optional parameters */
630 + void __iomem *base;
641 + struct clk *safe_parent;
642 + unsigned long safe_freq;
645 +static inline struct mux_div_clk *to_mux_div_clk(struct clk_hw *hw)
647 + return container_of(hw, struct mux_div_clk, hw);
650 +extern const struct clk_ops clk_ops_mux_div_clk;