summaryrefslogtreecommitdiff
path: root/spin/snoopy.pml
diff options
context:
space:
mode:
authorMike Haertel2023-04-08 11:37:53 -0700
committerMike Haertel2023-04-08 11:37:53 -0700
commit676e0a0cd57a9b8dcf234609984018ce96f95110 (patch)
treebe6fc4a17892ab863a6b87ae60f704204c49c0a6 /spin/snoopy.pml
parentNefariously add our nefarious plan. (diff)
downloadbiggolf-676e0a0cd57a9b8dcf234609984018ce96f95110.tar.xz
questionable hacks
Diffstat (limited to 'spin/snoopy.pml')
-rw-r--r--spin/snoopy.pml38
1 files changed, 22 insertions, 16 deletions
diff --git a/spin/snoopy.pml b/spin/snoopy.pml
index 4774b1a..1aff5af 100644
--- a/spin/snoopy.pml
+++ b/spin/snoopy.pml
@@ -18,6 +18,7 @@ chan qs[6] = [1] of { mtype };
18proctype cache(int recvqbase; int sendqbase; int npeers) 18proctype cache(int recvqbase; int sendqbase; int npeers)
19{ 19{
20#define ALLPEERS ((1 << npeers) - 1) 20#define ALLPEERS ((1 << npeers) - 1)
21#define ERESET() atomic { ebcmd = I; ebstate = I; enotsent = 0; enotrcvd = 0; ereverse = 0; esource = -1; }
21#define RQ(i) qs[recvqbase+i] 22#define RQ(i) qs[recvqbase+i]
22#define SQ(i) qs[sendqbase+i] 23#define SQ(i) qs[sendqbase+i]
23 24
@@ -47,8 +48,8 @@ proctype cache(int recvqbase; int sendqbase; int npeers)
47 :: atomic { RQ(i)?U -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> if :: ebstate == I -> ebstate = U :: else fi } 48 :: atomic { RQ(i)?U -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> if :: ebstate == I -> ebstate = U :: else fi }
48 :: atomic { RQ(i)?X -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> enotsent = enotsent | (1 << i) -> ereverse = ereverse + 1 -> if 49 :: atomic { RQ(i)?X -> assert(ebcmd == ItoM && (enotsent & (1 << i)) == 0 && (enotrcvd & (1 << i)) != 0) -> enotrcvd = enotrcvd & ~(1 << i) -> enotsent = enotsent | (1 << i) -> ereverse = ereverse + 1 -> if
49 // To prevent deadlock, if we get X'd too many times abandon the transaction and send X to its source 50 // To prevent deadlock, if we get X'd too many times abandon the transaction and send X to its source
50 :: ereverse > npeers && esource == -1 && enotsent == ALLPEERS -> ebcmd = I -> ebstate = I -> enotsent = 0 -> ereverse = 0 -> esource = -1 51 :: atomic { ereverse > npeers && esource == -1 && enotsent == ALLPEERS -> ERESET() }
51 :: ereverse > npeers && esource != -1 && enotsent == ALLPEERS & ~(1 << esource) -> SQ(esource)!X -> ebcmd = I -> ebstate = I -> enotsent = 0 -> ereverse = 0 -> esource = -1 52 :: atomic { ereverse > npeers && esource != -1 && enotsent == ALLPEERS & ~(1 << esource) -> SQ(esource)!X -> ERESET() }
52 :: else 53 :: else
53 fi } 54 fi }
54 55
@@ -61,29 +62,34 @@ proctype cache(int recvqbase; int sendqbase; int npeers)
61 :: ebstate == M 62 :: ebstate == M
62 :: ebstate == U -> SQ(esource)!U 63 :: ebstate == U -> SQ(esource)!U
63 :: else -> assert(false) 64 :: else -> assert(false)
64 fi -> ebcmd = I -> ebstate = I -> ereverse = 0 -> esource = -1 } 65 fi -> ERESET() }
65 66
66 // Complete locally-originated request when cache space is available to hold the data 67 // Complete locally-originated request when cache space is available to hold the data
67 :: atomic { ebcmd == ItoM && enotsent == 0 && enotrcvd == 0 && esource == -1 && cstate == I -> if 68 :: atomic { ebcmd == ItoM && enotsent == 0 && enotrcvd == 0 && esource == -1 && cstate == I -> if
68 :: (ebstate == M | ebstate == U) -> cstate = M 69 :: (ebstate == M | ebstate == U) -> cstate = M
69 :: else -> assert(false) 70 :: else -> assert(false)
70 fi -> ebcmd = I -> ebstate = I } 71 fi -> ERESET() }
71 72
72 :: else -> atomic { if 73#if 0
73 // Simulate local cache being occupied by other address 74 // Simulate local cache being occupied by other address
74 :: atomic { cstate == I -> cstate = Busy } 75 :: atomic { if :: cstate == I -> cstate = Busy :: cstate == Busy -> cstate = I :: else fi }
75 :: atomic { cstate == Busy -> cstate = I }
76 76
77 // Simulate evilbuffers conducting transaction for other address 77 // Simulate evilbuffers conducting transaction for other address
78 :: atomic { ebcmd == I -> ebcmd = Busy } 78 :: atomic { if :: ebcmd == I -> ebcmd = Busy :: ebcmd == Busy -> ebcmd = I :: else fi }
79 :: atomic { ebcmd == Busy -> ebcmd = I } 79#endif
80 80
81 // Launch locally-requested fills; rely on deadlock avoidance above to protect from multiple peers simultaneously deciding to do this 81#if 0
82 :: atomic { ebcmd == I -> ebcmd = ItoM -> enotsent = ALLPEERS -> enotrcvd = 0 -> ereverse = 0 -> esource = -1 } 82 // If we're not in a transaction assert that the evil buffer is fully reset (just to make sure we've limited reachable states)
83 :: atomic { ebcmd == Busy || ebcmd == I -> assert(ebstate == I && enotsent == 0 && enotrcvd == 0 && ereverse == 0 && esource == -1) }
84 // If we're in ItoM transaction assert that it's not in the cache
85 :: atomic { ebcmd == ItoM -> assert(cstate == I || cstate == Busy) }
86#endif
83 87
84 // Cycle through connections to all peers to ensure fairness 88 // Launch locally-requested fills; rely on deadlock avoidance above to protect from multiple peers simultaneously deciding to do this
85 :: atomic { i = (i + 1) % npeers } 89 :: atomic { cstate != M && ebcmd == I -> ebcmd = ItoM -> enotsent = ALLPEERS -> esource = -1 }
86 fi } 90
91 // Cycle through connections to all peers to ensure fairness
92 :: atomic { i = (i + 1) % npeers }
87 od 93 od
88 94
89#undef ALLPEERS 95#undef ALLPEERS