1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
mtype {
I, M, // valid cache line states: invalid, modified
U, X, // evict/fill/snoop responses that are not valid cache line states: unknown (from DRAM only), fail (you have to try again)
ItoM, MtoI, // commands from peers
Busy, // used as both cache line state and evil buffer command to denote activity on behalf of other addresses
}
proctype dram(chan recvq; chan sendq)
{
do
:: atomic { recvq?ItoM -> sendq!U }
:: atomic { recvq?MtoI -> sendq!I }
od
}
chan qs[6] = [1] of { mtype };
proctype cache(int recvqbase; int sendqbase; int npeers)
{
#define ALLPEERS ((1 << npeers) - 1)
#define ERESET() atomic { ebcmd = I; ebstate = I; enotsent = 0; enotrcvd = 0; ereverse = 0; esource = -1; }
#define RQ(i) qs[recvqbase+i]
#define SQ(i) qs[sendqbase+i]
mtype cstate = I; // I, M, or Busy
mtype ebcmd = I; // I, ItoM, MtoI, or Busy
int ebstate = I; // I, M, or U
int enotsent = 0; // bitmask of snoops remaining to send
int enotrcvd = 0; // bitmask of replies remaining to receive
int ereverse = 0; // count of reverse progress
int esource = -1; // source of command (-1 is originated locally)
int i = 0; // current peer
do
// Receive fill request from peer
:: atomic { RQ(i)?ItoM -> if
:: cstate == M -> cstate = I -> SQ(i)!M
:: cstate == I && ebcmd == I -> ebcmd = ItoM -> enotsent = ALLPEERS & ~(1 << i) -> esource = i
:: cstate == I && ebcmd == MtoI && enotsent != 0 -> ERESET() -> SQ(i)!M
:: else -> SQ(i)!X
fi }
// Receive eviction request from peer
:: atomic { RQ(i)?MtoI -> if
:: cstate == M || ebcmd == MtoI -> assert(false)
:: cstate == I && ebcmd == I -> cstate = M -> SQ(i)!I
// :: cstate == I && ebcmd == ItoM -> ebstate = M -> enotsent = 0 -> SQ(i)!I // Missing logic: cut-through send to esource, clear out eb when enotrcvd is already 0
:: cstate == I && ebcmd == ItoM -> SQ(i)!X
:: cstate == I && ebcmd == Busy -> cstate = M -> SQ(i)!I
:: cstate == Busy && ebcmd == I -> assert(i != npeers-1) -> ebcmd = MtoI -> enotsent = 1 << (npeers-1) -> SQ(i)!I
:: cstate == Busy && ebcmd != I -> SQ(i)!X
fi }
// Receive fill responses from peers
:: atomic { RQ(i)?I -> assert((enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> if
:: ebcmd == ItoM -> enotrcvd = enotrcvd & ~(1 << i)
:: ebcmd == MtoI -> ERESET()
:: else -> assert(false)
fi }
:: atomic { RQ(i)?M -> assert(ebcmd == ItoM) -> assert((enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> ebstate = M -> if
:: esource != -1 -> SQ(esource)!M
:: else /* ideally put it into the cache right away */
fi }
:: atomic { RQ(i)?U -> assert(ebcmd == ItoM) -> assert((enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> if :: ebstate == I -> ebstate = U :: else fi }
:: atomic { RQ(i)?X -> assert((enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> enotsent = enotsent | (1 << i) -> if
:: ebcmd == ItoM -> ereverse = ereverse + 1 -> if
:: atomic { ereverse > npeers && esource == -1 && enotsent == ALLPEERS -> ERESET() }
:: atomic { ereverse > npeers - 1 && esource != -1 && enotsent == ALLPEERS & ~(1 << esource) -> SQ(esource)!X -> ERESET() }
:: else
fi
:: ebcmd == MtoI
:: else -> assert(false)
fi }
// Send not-yet-sent snoops
:: atomic { ebcmd == ItoM && (enotsent & (1 << i)) -> enotsent = enotsent & ~(1 << i) -> enotrcvd = enotrcvd | (1 << i) -> SQ(i)!ItoM }
// Send not-yet-sent evictions
:: atomic { ebcmd == MtoI && (enotsent & (1 << i)) -> enotsent = enotsent & ~(1 << i) -> enotrcvd = enotrcvd | (1 << i) -> SQ(i)!MtoI }
// Complete requests from peers (sending response if not sent earlier)
:: atomic { ebcmd == ItoM && enotsent == 0 && enotrcvd == 0 && esource != -1 -> if
:: ebstate == I -> SQ(esource)!I
:: ebstate == M
:: ebstate == U -> SQ(esource)!U
:: else -> assert(false)
fi -> ERESET() }
// Complete locally-originated request when cache space is available to hold the data
:: atomic { ebcmd == ItoM && enotsent == 0 && enotrcvd == 0 && esource == -1 && cstate == I -> if
:: (ebstate == M | ebstate == U) -> cstate = M
:: else -> assert(false)
fi -> ERESET() }
// Simulate local cache being occupied by other address
:: atomic { cstate == I -> cstate = Busy }
:: atomic { cstate == Busy -> cstate = I }
// Simulate evilbuffers conducting transaction for other address
:: atomic { ebcmd == I -> ebcmd = Busy }
:: atomic { ebcmd == Busy -> ebcmd = I }
// Simulate local cache spontaneously evicting
:: atomic { cstate == M && ebcmd == I -> cstate = I -> ebcmd = MtoI -> enotsent = 1 << (npeers-1) }
// If we're not in a transaction assert that the evil buffer is fully reset (just to make sure we've limited reachable states)
:: atomic { ebcmd == Busy || ebcmd == I -> assert(ebstate == I && enotsent == 0 && enotrcvd == 0 && ereverse == 0 && esource == -1) }
// If we're in ItoM transaction assert that it's not in the cache
:: atomic { ebcmd == ItoM -> assert(cstate == I || cstate == Busy) }
// Launch locally-requested fills; rely on deadlock avoidance above to protect from multiple peers simultaneously deciding to do this
:: atomic { cstate != M && ebcmd == I -> ebcmd = ItoM -> enotsent = ALLPEERS -> esource = -1 }
// Cycle through connections to all peers to ensure fairness
:: atomic { i = (i + 1) % npeers }
od
#undef ALLPEERS
#undef RQ
#undef SQ
}
// C1 C2
// \ /
// C3
// |
// DRAM
//
// [0] is c1-to-c3
// [1] is c2-to-c3
// [2] is dram-to-c3
// [3] is c3-to-c1
// [4] is c3-to-c2
// [5] is c3-to-dram
init
{
atomic {
run cache(3, 0, 1); // C1 (will be proc 1)
run cache(4, 1, 1); // C2 (will be proc 2)
run cache(0, 3, 3); // C3 (will be proc 3)
run dram(qs[5], qs[2]); // dram (will be proc 4)
}
}
|