1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
use volatile_register::RO;
#[cfg(any(armv7m, target_arch = "x86_64"))]
use volatile_register::RW;
#[cfg(any(armv7m, target_arch = "x86_64"))]
use peripheral::CPUID;
#[repr(C)]
pub struct RegisterBlock {
pub base: RO<u32>,
reserved0: [u32; 15],
pub pfr: [RO<u32>; 2],
pub dfr: RO<u32>,
pub afr: RO<u32>,
pub mmfr: [RO<u32>; 4],
pub isar: [RO<u32>; 5],
reserved1: u32,
#[cfg(any(armv7m, target_arch = "x86_64"))]
pub clidr: RO<u32>,
#[cfg(any(armv7m, target_arch = "x86_64"))]
pub ctr: RO<u32>,
#[cfg(any(armv7m, target_arch = "x86_64"))]
pub ccsidr: RO<u32>,
#[cfg(any(armv7m, target_arch = "x86_64"))]
pub csselr: RW<u32>,
}
#[cfg(any(armv7m, target_arch = "x86_64"))]
pub enum CsselrCacheType {
DataOrUnified = 0,
Instruction = 1,
}
#[cfg(any(armv7m, target_arch = "x86_64"))]
impl CPUID {
pub fn select_cache(&mut self, level: u8, ind: CsselrCacheType) {
const CSSELR_IND_POS: u32 = 0;
const CSSELR_IND_MASK: u32 = 1 << CSSELR_IND_POS;
const CSSELR_LEVEL_POS: u32 = 1;
const CSSELR_LEVEL_MASK: u32 = 0x7 << CSSELR_LEVEL_POS;
unsafe {
self.csselr.write(
(((level as u32) << CSSELR_LEVEL_POS) & CSSELR_LEVEL_MASK)
| (((ind as u32) << CSSELR_IND_POS) & CSSELR_IND_MASK),
)
}
}
pub fn cache_num_sets_ways(&mut self, level: u8, ind: CsselrCacheType) -> (u16, u16) {
const CCSIDR_NUMSETS_POS: u32 = 13;
const CCSIDR_NUMSETS_MASK: u32 = 0x7FFF << CCSIDR_NUMSETS_POS;
const CCSIDR_ASSOCIATIVITY_POS: u32 = 3;
const CCSIDR_ASSOCIATIVITY_MASK: u32 = 0x3FF << CCSIDR_ASSOCIATIVITY_POS;
self.select_cache(level, ind);
::asm::dsb();
let ccsidr = self.ccsidr.read();
(
(1 + ((ccsidr & CCSIDR_NUMSETS_MASK) >> CCSIDR_NUMSETS_POS)) as u16,
(1 + ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> CCSIDR_ASSOCIATIVITY_POS)) as u16,
)
}
}