summaryrefslogtreecommitdiff
path: root/spin/snoopy.pml
blob: 1aff5afc6574e6725695d20f8e4c7f0db5c6ef74 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
mtype {
    I, M,               // valid cache line states: invalid, modified
    U, X,               // fill/snoop responses that are not valid cache line states: unknown (from DRAM only), fail (you have to try again)
    ItoM, MtoI,         // commands from peers
    Busy,               // used as both cache line state and evil buffer command to denote activity on behalf of other addresses
}

proctype dram(chan recvq; chan sendq)
{
    do
    :: atomic { recvq?ItoM -> sendq!U }
    :: atomic { recvq?MtoI -> sendq!I }
    od
}

chan qs[6] = [1] of { mtype };

proctype cache(int recvqbase; int sendqbase; int npeers)
{
#define ALLPEERS ((1 << npeers) - 1)
#define ERESET() atomic { ebcmd = I; ebstate = I; enotsent = 0; enotrcvd = 0; ereverse = 0; esource = -1; }
#define RQ(i) qs[recvqbase+i]
#define SQ(i) qs[sendqbase+i]

    mtype cstate = I;       // I, M, or Busy
    mtype ebcmd = I;        // I, ItoM, or Busy
    int ebstate = I;        // I, M, or U
    int enotsent = 0;       // bitmask of snoops remaining to send
    int enotrcvd = 0;       // bitmask of replies remaining to receive
    int ereverse = 0;       // count of reverse progress
    int esource = -1;       // source of command (-1 is originated locally)
    int i = 0;              // current peer

    do
    // Receive fill request from peer
    :: atomic { RQ(i)?ItoM -> if
        :: cstate == M -> cstate = I -> SQ(i)!M
        :: cstate == I && ebcmd == I -> ebcmd = ItoM -> enotsent = ALLPEERS & ~(1 << i) -> esource = i
        :: else -> SQ(i)!X
        fi }

    // Receive fill responses from peers
    :: atomic { RQ(i)?I -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) }
    :: atomic { RQ(i)?M -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> ebstate = M -> if
        :: esource != -1 -> SQ(esource)!M
        :: else /* ideally put it into the cache right away */
        fi }
    :: atomic { RQ(i)?U -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> if :: ebstate == I -> ebstate = U :: else fi }
    :: atomic { RQ(i)?X -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> enotsent = enotsent | (1 << i) -> ereverse = ereverse + 1 -> if
        // To prevent deadlock, if we get X'd too many times abandon the transaction and send X to its source
        :: atomic { ereverse > npeers && esource == -1 && enotsent == ALLPEERS -> ERESET() }
        :: atomic { ereverse > npeers && esource != -1 && enotsent == ALLPEERS & ~(1 << esource) -> SQ(esource)!X -> ERESET() }
        :: else
        fi }

    // Send not-yet-sent snoops
    :: atomic { ebcmd == ItoM && (enotsent & (1 << i)) -> enotsent = enotsent & ~(1 << i) -> enotrcvd = enotrcvd | (1 << i) -> SQ(i)!ItoM }

    // Complete requests from peers (sending response if not sent earlier)
    :: atomic { ebcmd == ItoM && enotsent == 0 && enotrcvd == 0 && esource != -1 -> if
        :: ebstate == I -> SQ(esource)!I
        :: ebstate == M
        :: ebstate == U -> SQ(esource)!U
        :: else -> assert(false)
        fi -> ERESET() }

    // Complete locally-originated request when cache space is available to hold the data
    :: atomic { ebcmd == ItoM && enotsent == 0 && enotrcvd == 0 && esource == -1 && cstate == I -> if
        :: (ebstate == M | ebstate == U) -> cstate = M
        :: else -> assert(false)
        fi -> ERESET() }

#if 0
    // Simulate local cache being occupied by other address
    :: atomic { if :: cstate == I -> cstate = Busy :: cstate == Busy -> cstate = I :: else fi }

    // Simulate evilbuffers conducting transaction for other address
    :: atomic { if :: ebcmd == I -> ebcmd = Busy :: ebcmd == Busy -> ebcmd = I :: else fi }
#endif

#if 0
    // If we're not in a transaction assert that the evil buffer is fully reset (just to make sure we've limited reachable states)
    :: atomic { ebcmd == Busy || ebcmd == I -> assert(ebstate == I && enotsent == 0 && enotrcvd == 0 && ereverse == 0 && esource == -1) }
    // If we're in ItoM transaction assert that it's not in the cache
    :: atomic { ebcmd == ItoM -> assert(cstate == I || cstate == Busy) }
#endif

    // Launch locally-requested fills; rely on deadlock avoidance above to protect from multiple peers simultaneously deciding to do this
    :: atomic { cstate != M && ebcmd == I -> ebcmd = ItoM -> enotsent = ALLPEERS -> esource = -1 }

    // Cycle through connections to all peers to ensure fairness
    :: atomic { i = (i + 1) % npeers }
    od

#undef ALLPEERS
#undef RQ
#undef SQ
}

//  C1   C2
//    \  /
//     C3
//     |
//    DRAM
//
// [0] is c1-to-c3
// [1] is c2-to-c3
// [2] is dram-to-c3
// [3] is c3-to-c1
// [4] is c3-to-c2
// [5] is c3-to-dram

init
{
    atomic {
        run cache(3, 0, 1);     // C1   (will be proc 1)
        run cache(4, 1, 1);     // C2   (will be proc 2)
        run cache(0, 3, 3);     // C3   (will be proc 3)
        run dram(qs[5], qs[2]); // dram (will be proc 4)
    }
}