summaryrefslogtreecommitdiff
path: root/frontend
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--frontend/bundle.h14
-rw-r--r--frontend/decode.h128
-rw-r--r--frontend/fetch.h79
3 files changed, 221 insertions, 0 deletions
diff --git a/frontend/bundle.h b/frontend/bundle.h
new file mode 100644
index 0000000..cc6f441
--- /dev/null
+++ b/frontend/bundle.h
@@ -0,0 +1,14 @@
1#pragma once
2
3#include "infra/pipetrace.h"
4#include "memory/line.h"
5
6namespace frontend {
7 struct bundle {
8 infra::transaction transaction;
9 unsigned int generation;
10 std::uint64_t line_address;
11 std::uint64_t next_line_address;
12 memory::line data;
13 };
14}
diff --git a/frontend/decode.h b/frontend/decode.h
new file mode 100644
index 0000000..717d0f6
--- /dev/null
+++ b/frontend/decode.h
@@ -0,0 +1,128 @@
1#pragma once
2
3#include "frontend/bundle.h"
4#include "frontend/fetch.h"
5#include "infra/port.h"
6#include "inst.h"
7#include "memory/line.h"
8
9namespace frontend {
10 struct decode : public infra::sim {
11 struct restart {
12 unsigned int new_generation;
13 std::uint64_t new_pc;
14 std::uint64_t from_pc;
15 };
16 infra::port<restart> restartp;
17 infra::port<fetch::restart> *fetch_restartp = nullptr;
18
19 infra::port<bundle> bundlep;
20 infra::port<inst> *instp = nullptr;
21
22 inst next_inst;
23 unsigned int generation_up = 0;
24 unsigned int generation_down = 0;
25 std::uint64_t pc = 0;
26
27 static constexpr unsigned int MAX_INST_SIZE = 64;
28 static constexpr unsigned int BYTES_PER_CYCLE = 4;
29
30 void clock() {
31 if (restartp.can_read()) {
32 auto r = restartp.read();
33 generation_down = r.new_generation;
34 pc = r.new_pc;
35 next_inst.size = 0;
36 for (auto &f : next_inst.field)
37 f = 0;
38 fetch::restart fr;
39 fr.new_generation = ++generation_up;
40 fr.new_next_line_address = pc >> memory::LINE_BYTES_LOG2;
41 fr.previous_line_address = r.from_pc >> memory::LINE_BYTES_LOG2;
42 fetch_restartp->write(std::move(fr));
43 return;
44 }
45 if (next_inst.size >= MAX_INST_SIZE)
46 return;
47 if (bundlep.can_read()) {
48 const auto &b = bundlep.peek();
49 for (unsigned int i = 0; i < BYTES_PER_CYCLE; ++i) {
50 auto line = pc >> memory::LINE_BYTES_LOG2;
51 auto offset = pc & memory::LINE_BYTE_OFFSET_MASK;
52 if (b.generation == generation_up && b.line_address == line && instp->can_write()) {
53 decodebyte byte;
54 std::memcpy(&byte, b.data.data() + offset, sizeof(byte));
55 pte(b.transaction, "d", fmt::format("decode gen={} pc={:x} byte={:02x}", generation_up, pc, *reinterpret_cast<std::uint8_t *>(&byte)));
56 ++next_inst.size;
57 if (byte.invert)
58 next_inst.field[byte.field] = ~next_inst.field[byte.field];
59 next_inst.field[byte.field] = next_inst.field[byte.field] << 4;
60 next_inst.field[byte.field] |= byte.bits;
61 ++pc;
62 if (!byte.hold) {
63 next_inst.transaction = infra::pt::child(b.transaction);
64 next_inst.generation = generation_down;
65 next_inst.linear_next_pc = pc;
66 next_inst.predicted_next_pc = pc;
67 pte(next_inst.transaction, "D", fmt::format("decode gen={}", generation_down));
68 bool jump = false;
69 std::optional<std::uint64_t> target;
70 std::optional<bool> taken;
71 switch (next_inst.field[OPCODE]) {
72 case OP_JUMP_ABS_IF_ZERO:
73 jump = true;
74 if (next_inst.field[FLAGS_DST] & FLAG_IMM1)
75 target = next_inst.field[SRC1];
76 if (next_inst.field[FLAGS_DST] & FLAG_IMM2)
77 taken = next_inst.field[SRC2] == 0;
78 break;
79 case OP_JUMP_ABS_IF_NONZERO:
80 jump = true;
81 if (next_inst.field[FLAGS_DST] & FLAG_IMM1)
82 target = next_inst.field[SRC1];
83 if (next_inst.field[FLAGS_DST] & FLAG_IMM2)
84 taken = next_inst.field[SRC2] != 0;
85 break;
86 }
87 std::optional<std::uint64_t> redirect;
88 bool unpredictable = false;
89 if (jump) {
90 if (target.has_value()) {
91 if (taken.has_value()) {
92 if (taken.value())
93 redirect = target;
94 } else if (target.value() < pc) {
95 redirect = target;
96 }
97 } else if (!taken.has_value() || taken.value()) {
98 unpredictable = true;
99 }
100 }
101 if (redirect.has_value()) {
102 pte(next_inst.transaction, "", fmt::format("fe predicts jump to {:x}", redirect.value()));
103 next_inst.predicted_next_pc = pc = redirect.value();
104 } else if (unpredictable) {
105 pte(next_inst.transaction, "", "frontend halt due to unpredictable jump");
106 next_inst.predicted_next_pc.reset();
107 }
108 instp->write(std::move(next_inst));
109 next_inst.size = unpredictable ? MAX_INST_SIZE : 0;
110 for (auto &f : next_inst.field)
111 f = 0;
112 }
113 }
114 }
115 auto line = pc >> memory::LINE_BYTES_LOG2;
116 if (b.generation == generation_up && b.line_address != line && b.next_line_address != line) {
117 fetch::restart fr;
118 fr.new_generation = ++generation_up;
119 fr.new_next_line_address = pc >> memory::LINE_BYTES_LOG2;
120 fr.previous_line_address = b.line_address;
121 fetch_restartp->write(std::move(fr));
122 }
123 if (b.generation != generation_up || b.line_address != line)
124 bundlep.discard();
125 }
126 }
127 };
128}
diff --git a/frontend/fetch.h b/frontend/fetch.h
new file mode 100644
index 0000000..0eaebd5
--- /dev/null
+++ b/frontend/fetch.h
@@ -0,0 +1,79 @@
1#pragma once
2
3#include <cassert>
4#include <cstdint>
5#include <cstring>
6#include <map>
7#include <optional>
8#include <utility>
9
10#include "frontend/bundle.h"
11#include "infra/pipetrace.h"
12#include "infra/sim.h"
13#include "memory/dram.h"
14
15namespace frontend {
16 struct fetch : public infra::sim {
17 struct restart {
18 unsigned int new_generation;
19 std::uint64_t previous_line_address;
20 std::uint64_t new_next_line_address;
21 };
22 infra::port<restart> restartp;
23
24 infra::port<memory::dram::command> *commandp = nullptr;
25 infra::port<memory::dram::response> responsep;
26
27 infra::port<bundle> *bundlep = nullptr;
28
29 unsigned int generation = 0;
30 std::uint64_t next_line_address = 0;
31
32 // FIXME make prediction table finite
33 std::map<std::uint64_t, std::uint64_t> predictor;
34
35 bool fill_request_sent = false;
36
37 void clock() {
38 if (restartp.can_read()) {
39 auto r = restartp.read();
40 generation = r.new_generation;
41 next_line_address = r.new_next_line_address;
42 fill_request_sent = false;
43 if (r.new_next_line_address == r.previous_line_address || r.new_next_line_address == r.previous_line_address + 1)
44 predictor.erase(r.previous_line_address);
45 else
46 predictor[r.previous_line_address] = r.new_next_line_address;
47 }
48 if (fill_request_sent && responsep.can_read() && bundlep->can_write()) {
49 auto r = responsep.read();
50 if (r.line_address == next_line_address) {
51 bundle b;
52 b.transaction = r.transaction;
53 b.generation = generation;
54 b.line_address = next_line_address;
55 if (auto p = predictor.find(next_line_address); p != predictor.end())
56 b.next_line_address = p->second;
57 else
58 b.next_line_address = next_line_address + 1;
59 next_line_address = b.next_line_address;
60 pte(b.transaction, "", fmt::format("next fetch line {:x}", next_line_address));
61 b.data = std::move(r.data);
62 bundlep->write(std::move(b));
63 fill_request_sent = false;
64 }
65 }
66 if (!fill_request_sent && commandp->can_write()) {
67 memory::dram::command c;
68 c.transaction = infra::pt::toplevel();
69 pte(c.transaction, "F", fmt::format("fetch gen={}", generation));
70 c.line_address = next_line_address;
71 c.responsep = &responsep;
72 commandp->write(std::move(c));
73 fill_request_sent = true;
74 }
75 if (!fill_request_sent)
76 responsep.discard();
77 }
78 };
79}