hw/arm/bcm2836: Only provide "enabled-cpus" property to multicore SoCs
[qemu.git] / hw / openrisc / cputimer.c
1 /*
2 * QEMU OpenRISC timer support
3 *
4 * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
5 * Zhizhou Zhang <etouzh@gmail.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "migration/vmstate.h"
24 #include "qemu/timer.h"
25
26 #define TIMER_PERIOD 50 /* 50 ns period for 20 MHz timer */
27
28 /* Tick Timer global state to allow all cores to be in sync */
29 typedef struct OR1KTimerState {
30 uint32_t ttcr;
31 uint64_t last_clk;
32 } OR1KTimerState;
33
34 static OR1KTimerState *or1k_timer;
35
36 void cpu_openrisc_count_set(OpenRISCCPU *cpu, uint32_t val)
37 {
38 or1k_timer->ttcr = val;
39 }
40
41 uint32_t cpu_openrisc_count_get(OpenRISCCPU *cpu)
42 {
43 return or1k_timer->ttcr;
44 }
45
46 /* Add elapsed ticks to ttcr */
47 void cpu_openrisc_count_update(OpenRISCCPU *cpu)
48 {
49 uint64_t now;
50
51 if (!cpu->env.is_counting) {
52 return;
53 }
54 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
55 or1k_timer->ttcr += (uint32_t)((now - or1k_timer->last_clk)
56 / TIMER_PERIOD);
57 or1k_timer->last_clk = now;
58 }
59
60 /* Update the next timeout time as difference between ttmr and ttcr */
61 void cpu_openrisc_timer_update(OpenRISCCPU *cpu)
62 {
63 uint32_t wait;
64 uint64_t now, next;
65
66 if (!cpu->env.is_counting) {
67 return;
68 }
69
70 cpu_openrisc_count_update(cpu);
71 now = or1k_timer->last_clk;
72
73 if ((cpu->env.ttmr & TTMR_TP) <= (or1k_timer->ttcr & TTMR_TP)) {
74 wait = TTMR_TP - (or1k_timer->ttcr & TTMR_TP) + 1;
75 wait += cpu->env.ttmr & TTMR_TP;
76 } else {
77 wait = (cpu->env.ttmr & TTMR_TP) - (or1k_timer->ttcr & TTMR_TP);
78 }
79 next = now + (uint64_t)wait * TIMER_PERIOD;
80 timer_mod(cpu->env.timer, next);
81 }
82
83 void cpu_openrisc_count_start(OpenRISCCPU *cpu)
84 {
85 cpu->env.is_counting = 1;
86 cpu_openrisc_count_update(cpu);
87 }
88
89 void cpu_openrisc_count_stop(OpenRISCCPU *cpu)
90 {
91 timer_del(cpu->env.timer);
92 cpu_openrisc_count_update(cpu);
93 cpu->env.is_counting = 0;
94 }
95
96 static void openrisc_timer_cb(void *opaque)
97 {
98 OpenRISCCPU *cpu = opaque;
99
100 if ((cpu->env.ttmr & TTMR_IE) &&
101 timer_expired(cpu->env.timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL))) {
102 CPUState *cs = CPU(cpu);
103
104 cpu->env.ttmr |= TTMR_IP;
105 cs->interrupt_request |= CPU_INTERRUPT_TIMER;
106 }
107
108 switch (cpu->env.ttmr & TTMR_M) {
109 case TIMER_NONE:
110 break;
111 case TIMER_INTR:
112 or1k_timer->ttcr = 0;
113 break;
114 case TIMER_SHOT:
115 cpu_openrisc_count_stop(cpu);
116 break;
117 case TIMER_CONT:
118 break;
119 }
120
121 cpu_openrisc_timer_update(cpu);
122 qemu_cpu_kick(CPU(cpu));
123 }
124
125 static const VMStateDescription vmstate_or1k_timer = {
126 .name = "or1k_timer",
127 .version_id = 1,
128 .minimum_version_id = 1,
129 .fields = (VMStateField[]) {
130 VMSTATE_UINT32(ttcr, OR1KTimerState),
131 VMSTATE_UINT64(last_clk, OR1KTimerState),
132 VMSTATE_END_OF_LIST()
133 }
134 };
135
136 void cpu_openrisc_clock_init(OpenRISCCPU *cpu)
137 {
138 cpu->env.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &openrisc_timer_cb, cpu);
139 cpu->env.ttmr = 0x00000000;
140
141 if (or1k_timer == NULL) {
142 or1k_timer = g_new0(OR1KTimerState, 1);
143 vmstate_register(NULL, 0, &vmstate_or1k_timer, or1k_timer);
144 }
145 }