summaryrefslogtreecommitdiff
path: root/frontend/decode.h
diff options
context:
space:
mode:
authorJulian Blake Kongslie2022-09-22 11:29:07 -0700
committerJulian Blake Kongslie2022-09-22 11:29:07 -0700
commiteb3fd68203fee7c63245c702914c2acd3332d65a (patch)
tree7796707c0372e7fbe4a8bac70aad95f619e8ba29 /frontend/decode.h
downloadprocmodel-eb3fd68203fee7c63245c702914c2acd3332d65a.tar.xz
Initial commit.
Diffstat (limited to '')
-rw-r--r--frontend/decode.h128
1 files changed, 128 insertions, 0 deletions
diff --git a/frontend/decode.h b/frontend/decode.h
new file mode 100644
index 0000000..717d0f6
--- /dev/null
+++ b/frontend/decode.h
@@ -0,0 +1,128 @@
1#pragma once
2
3#include "frontend/bundle.h"
4#include "frontend/fetch.h"
5#include "infra/port.h"
6#include "inst.h"
7#include "memory/line.h"
8
9namespace frontend {
10 struct decode : public infra::sim {
11 struct restart {
12 unsigned int new_generation;
13 std::uint64_t new_pc;
14 std::uint64_t from_pc;
15 };
16 infra::port<restart> restartp;
17 infra::port<fetch::restart> *fetch_restartp = nullptr;
18
19 infra::port<bundle> bundlep;
20 infra::port<inst> *instp = nullptr;
21
22 inst next_inst;
23 unsigned int generation_up = 0;
24 unsigned int generation_down = 0;
25 std::uint64_t pc = 0;
26
27 static constexpr unsigned int MAX_INST_SIZE = 64;
28 static constexpr unsigned int BYTES_PER_CYCLE = 4;
29
30 void clock() {
31 if (restartp.can_read()) {
32 auto r = restartp.read();
33 generation_down = r.new_generation;
34 pc = r.new_pc;
35 next_inst.size = 0;
36 for (auto &f : next_inst.field)
37 f = 0;
38 fetch::restart fr;
39 fr.new_generation = ++generation_up;
40 fr.new_next_line_address = pc >> memory::LINE_BYTES_LOG2;
41 fr.previous_line_address = r.from_pc >> memory::LINE_BYTES_LOG2;
42 fetch_restartp->write(std::move(fr));
43 return;
44 }
45 if (next_inst.size >= MAX_INST_SIZE)
46 return;
47 if (bundlep.can_read()) {
48 const auto &b = bundlep.peek();
49 for (unsigned int i = 0; i < BYTES_PER_CYCLE; ++i) {
50 auto line = pc >> memory::LINE_BYTES_LOG2;
51 auto offset = pc & memory::LINE_BYTE_OFFSET_MASK;
52 if (b.generation == generation_up && b.line_address == line && instp->can_write()) {
53 decodebyte byte;
54 std::memcpy(&byte, b.data.data() + offset, sizeof(byte));
55 pte(b.transaction, "d", fmt::format("decode gen={} pc={:x} byte={:02x}", generation_up, pc, *reinterpret_cast<std::uint8_t *>(&byte)));
56 ++next_inst.size;
57 if (byte.invert)
58 next_inst.field[byte.field] = ~next_inst.field[byte.field];
59 next_inst.field[byte.field] = next_inst.field[byte.field] << 4;
60 next_inst.field[byte.field] |= byte.bits;
61 ++pc;
62 if (!byte.hold) {
63 next_inst.transaction = infra::pt::child(b.transaction);
64 next_inst.generation = generation_down;
65 next_inst.linear_next_pc = pc;
66 next_inst.predicted_next_pc = pc;
67 pte(next_inst.transaction, "D", fmt::format("decode gen={}", generation_down));
68 bool jump = false;
69 std::optional<std::uint64_t> target;
70 std::optional<bool> taken;
71 switch (next_inst.field[OPCODE]) {
72 case OP_JUMP_ABS_IF_ZERO:
73 jump = true;
74 if (next_inst.field[FLAGS_DST] & FLAG_IMM1)
75 target = next_inst.field[SRC1];
76 if (next_inst.field[FLAGS_DST] & FLAG_IMM2)
77 taken = next_inst.field[SRC2] == 0;
78 break;
79 case OP_JUMP_ABS_IF_NONZERO:
80 jump = true;
81 if (next_inst.field[FLAGS_DST] & FLAG_IMM1)
82 target = next_inst.field[SRC1];
83 if (next_inst.field[FLAGS_DST] & FLAG_IMM2)
84 taken = next_inst.field[SRC2] != 0;
85 break;
86 }
87 std::optional<std::uint64_t> redirect;
88 bool unpredictable = false;
89 if (jump) {
90 if (target.has_value()) {
91 if (taken.has_value()) {
92 if (taken.value())
93 redirect = target;
94 } else if (target.value() < pc) {
95 redirect = target;
96 }
97 } else if (!taken.has_value() || taken.value()) {
98 unpredictable = true;
99 }
100 }
101 if (redirect.has_value()) {
102 pte(next_inst.transaction, "", fmt::format("fe predicts jump to {:x}", redirect.value()));
103 next_inst.predicted_next_pc = pc = redirect.value();
104 } else if (unpredictable) {
105 pte(next_inst.transaction, "", "frontend halt due to unpredictable jump");
106 next_inst.predicted_next_pc.reset();
107 }
108 instp->write(std::move(next_inst));
109 next_inst.size = unpredictable ? MAX_INST_SIZE : 0;
110 for (auto &f : next_inst.field)
111 f = 0;
112 }
113 }
114 }
115 auto line = pc >> memory::LINE_BYTES_LOG2;
116 if (b.generation == generation_up && b.line_address != line && b.next_line_address != line) {
117 fetch::restart fr;
118 fr.new_generation = ++generation_up;
119 fr.new_next_line_address = pc >> memory::LINE_BYTES_LOG2;
120 fr.previous_line_address = b.line_address;
121 fetch_restartp->write(std::move(fr));
122 }
123 if (b.generation != generation_up || b.line_address != line)
124 bundlep.discard();
125 }
126 }
127 };
128}