summaryrefslogtreecommitdiff
path: root/spin/snoopy.pml
diff options
context:
space:
mode:
Diffstat (limited to 'spin/snoopy.pml')
-rw-r--r--spin/snoopy.pml115
1 files changed, 115 insertions, 0 deletions
diff --git a/spin/snoopy.pml b/spin/snoopy.pml
new file mode 100644
index 0000000..4774b1a
--- /dev/null
+++ b/spin/snoopy.pml
@@ -0,0 +1,115 @@
1mtype {
2 I, M, // valid cache line states: invalid, modified
3 U, X, // fill/snoop responses that are not valid cache line states: unknown (from DRAM only), fail (you have to try again)
4 ItoM, MtoI, // commands from peers
5 Busy, // used as both cache line state and evil buffer command to denote activity on behalf of other addresses
6}
7
8proctype dram(chan recvq; chan sendq)
9{
10 do
11 :: atomic { recvq?ItoM -> sendq!U }
12 :: atomic { recvq?MtoI -> sendq!I }
13 od
14}
15
16chan qs[6] = [1] of { mtype };
17
18proctype cache(int recvqbase; int sendqbase; int npeers)
19{
20#define ALLPEERS ((1 << npeers) - 1)
21#define RQ(i) qs[recvqbase+i]
22#define SQ(i) qs[sendqbase+i]
23
24 mtype cstate = I; // I, M, or Busy
25 mtype ebcmd = I; // I, ItoM, or Busy
26 int ebstate = I; // I, M, or U
27 int enotsent = 0; // bitmask of snoops remaining to send
28 int enotrcvd = 0; // bitmask of replies remaining to receive
29 int ereverse = 0; // count of reverse progress
30 int esource = -1; // source of command (-1 is originated locally)
31 int i = 0; // current peer
32
33 do
34 // Receive fill request from peer
35 :: atomic { RQ(i)?ItoM -> if
36 :: cstate == M -> cstate = I -> SQ(i)!M
37 :: cstate == I && ebcmd == I -> ebcmd = ItoM -> enotsent = ALLPEERS & ~(1 << i) -> esource = i
38 :: else -> SQ(i)!X
39 fi }
40
41 // Receive fill responses from peers
42 :: atomic { RQ(i)?I -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) }
43 :: atomic { RQ(i)?M -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> ebstate = M -> if
44 :: esource != -1 -> SQ(esource)!M
45 :: else /* ideally put it into the cache right away */
46 fi }
47 :: atomic { RQ(i)?U -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> if :: ebstate == I -> ebstate = U :: else fi }
48 :: atomic { RQ(i)?X -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> enotsent = enotsent | (1 << i) -> ereverse = ereverse + 1 -> if
49 // To prevent deadlock, if we get X'd too many times abandon the transaction and send X to its source
50 :: ereverse > npeers && esource == -1 && enotsent == ALLPEERS -> ebcmd = I -> ebstate = I -> enotsent = 0 -> ereverse = 0 -> esource = -1
51 :: ereverse > npeers && esource != -1 && enotsent == ALLPEERS & ~(1 << esource) -> SQ(esource)!X -> ebcmd = I -> ebstate = I -> enotsent = 0 -> ereverse = 0 -> esource = -1
52 :: else
53 fi }
54
55 // Send not-yet-sent snoops
56 :: atomic { ebcmd == ItoM && (enotsent & (1 << i)) -> enotsent = enotsent & ~(1 << i) -> enotrcvd = enotrcvd | (1 << i) -> SQ(i)!ItoM }
57
58 // Complete requests from peers (sending response if not sent earlier)
59 :: atomic { ebcmd == ItoM && enotsent == 0 && enotrcvd == 0 && esource != -1 -> if
60 :: ebstate == I -> SQ(esource)!I
61 :: ebstate == M
62 :: ebstate == U -> SQ(esource)!U
63 :: else -> assert(false)
64 fi -> ebcmd = I -> ebstate = I -> ereverse = 0 -> esource = -1 }
65
66 // Complete locally-originated request when cache space is available to hold the data
67 :: atomic { ebcmd == ItoM && enotsent == 0 && enotrcvd == 0 && esource == -1 && cstate == I -> if
68 :: (ebstate == M | ebstate == U) -> cstate = M
69 :: else -> assert(false)
70 fi -> ebcmd = I -> ebstate = I }
71
72 :: else -> atomic { if
73 // Simulate local cache being occupied by other address
74 :: atomic { cstate == I -> cstate = Busy }
75 :: atomic { cstate == Busy -> cstate = I }
76
77 // Simulate evilbuffers conducting transaction for other address
78 :: atomic { ebcmd == I -> ebcmd = Busy }
79 :: atomic { ebcmd == Busy -> ebcmd = I }
80
81 // Launch locally-requested fills; rely on deadlock avoidance above to protect from multiple peers simultaneously deciding to do this
82 :: atomic { ebcmd == I -> ebcmd = ItoM -> enotsent = ALLPEERS -> enotrcvd = 0 -> ereverse = 0 -> esource = -1 }
83
84 // Cycle through connections to all peers to ensure fairness
85 :: atomic { i = (i + 1) % npeers }
86 fi }
87 od
88
89#undef ALLPEERS
90#undef RQ
91#undef SQ
92}
93
94// C1 C2
95// \ /
96// C3
97// |
98// DRAM
99//
100// [0] is c1-to-c3
101// [1] is c2-to-c3
102// [2] is dram-to-c3
103// [3] is c3-to-c1
104// [4] is c3-to-c2
105// [5] is c3-to-dram
106
107init
108{
109 atomic {
110 run cache(3, 0, 1); // C1 (will be proc 1)
111 run cache(4, 1, 1); // C2 (will be proc 2)
112 run cache(0, 3, 3); // C3 (will be proc 3)
113 run dram(qs[5], qs[2]); // dram (will be proc 4)
114 }
115}