Compare commits

...

5 Commits

Author SHA1 Message Date
David Anderson 1929bbe3cc vram/VRAM: at last, a video RAM, with all the gubbins 2024-09-08 23:39:12 -07:00
David Anderson fb57903021 vram/VRAMCore: make simulatable in Bluesim, tidy up 2024-09-08 23:19:39 -07:00
David Anderson 79b54ca86f vram/MemArbiter: add a granted_port method to make downstream wiring easier
To implement the mux tree that feeds into RAM ports, we need to know the
port index of the grantee to be able to wire it up. In theory we could
dispense with the per-port grant signal, but keeping it around allows
each client to deal with local concerns separate from the port routing.
2024-09-08 23:16:49 -07:00
David Anderson 2ebf399d62 vram/MemArbiter: remove MemArbiterClient, not needed right now 2024-09-08 22:44:39 -07:00
David Anderson eca95e0fb6 vram/VRAMCore: correct exports of the vram types 2024-09-08 15:37:47 -07:00
4 changed files with 225 additions and 117 deletions

View File

@ -5,7 +5,6 @@ import Vector::*;
export MemArbiterOp(..); export MemArbiterOp(..);
export MemArbiterServer(..); export MemArbiterServer(..);
export MemArbiterClient(..);
export MemArbiter(..), mkPriorityMemArbiter, mkRoundRobinMemArbiter; export MemArbiter(..), mkPriorityMemArbiter, mkRoundRobinMemArbiter;
// A MemArbiterOp is an operation that a client is seeking permission // A MemArbiterOp is an operation that a client is seeking permission
@ -15,6 +14,8 @@ typedef struct {
addr addr; addr addr;
} MemArbiterOp#(type addr) deriving (Bits, Eq, FShow); } MemArbiterOp#(type addr) deriving (Bits, Eq, FShow);
// mem_ops_conflict reports whether memory accesses a and b would
// cause undefined behavior if they proceed simultaneously.
function Bool mem_ops_conflict(Maybe#(MemArbiterOp#(addr)) a, Maybe#(MemArbiterOp#(addr)) b) function Bool mem_ops_conflict(Maybe#(MemArbiterOp#(addr)) a, Maybe#(MemArbiterOp#(addr)) b)
provisos(Eq#(addr)); provisos(Eq#(addr));
@ -31,58 +32,39 @@ interface MemArbiterServer#(type addr);
method Bool grant(); method Bool grant();
endinterface endinterface
// A MemArbiterClient emits requests and receives grants.
interface MemArbiterClient#(type addr);
method Maybe#(MemArbiterOp#(addr)) request();
method Action grant();
endinterface
// Arbiter clients and servers can be connected in the obvious way.
instance Connectable#(MemArbiterClient#(addr), MemArbiterServer#(addr));
module mkConnection(MemArbiterClient#(addr) client, MemArbiterServer#(addr) server, Empty ifc);
rule send_request (client.request matches tagged Valid .req);
server.request(req);
endrule
rule send_grant (server.grant());
client.grant();
endrule
endmodule
endinstance
// A MemArbiter manages concurrent access to a memory port. // A MemArbiter manages concurrent access to a memory port.
interface MemArbiter#(numeric type num_clients, type addr); interface MemArbiter#(numeric type num_clients, type addr);
// ports allow clients to request memory access. // ports allow clients to request memory access.
interface Vector#(num_clients, MemArbiterServer#(addr)) ports; interface Vector#(num_clients, MemArbiterServer#(addr)) ports;
// granted_port returns the index in ports of the client that is
// being granted its request.
method UInt#(TLog#(num_clients)) granted_port();
// The following methods are to support arbiter chaining. // The following methods are to support arbiter chaining.
// //
// Suppose you're arbitrating access to a dual-port // Suppose you're arbitrating access to a dual-port memory.
// memory. Typically, such a memory specifies that if one port is // Typically, such a memory specifies that if one port is writing
// writing to an address, the other must not concurrently read or // to an address, the other must not concurrently read or write
// write that same address. This means the arbiters attached to // that same address. This means the arbiters attached to each
// each memory port must cooperate to avoid simultaneously granting // memory port must cooperate to avoid simultaneously granting
// conflicting requests from their clients. // conflicting requests from their clients.
// //
// Calling conflict prevents the arbiter from granting a concurrent // conflict_in supplies an already granted operation that this
// request that would result in a write-write, read-write or // arbiter must avoid conflicting with. conflict_out emits the
// write-read conflict. granted_op emits the operation that the // operation that the arbiter is granting, if any.
// arbiter is granting, if any.
// //
// MemArbiter intances are Connectable: mkConnection(a, b) gives // mkConnection(firstArbiter, secondArbiter) gives conflict
// conflict priority to a. That is, b only grants requests that // priority to firstArbiter. That is, secondArbiter only grants
// don't conflict with a's grant. // requests that don't conflict with grants made by firstArbiter.
(* always_ready *) (* always_ready *)
method Action conflict(MemArbiterOp#(addr) conflict); method Action conflict_in(MemArbiterOp#(addr) conflict);
method MemArbiterOp#(addr) granted_op(); method MemArbiterOp#(addr) conflict_out();
endinterface endinterface
instance Connectable#(MemArbiter#(m, addr), MemArbiter#(n, addr)); instance Connectable#(MemArbiter#(m, addr), MemArbiter#(n, addr));
module mkConnection(MemArbiter#(m, addr) a, MemArbiter#(n, addr) b, Empty ifc); module mkConnection(MemArbiter#(m, addr) a, MemArbiter#(n, addr) b, Empty ifc);
(* fire_when_enabled *) mkConnection(a.conflict_out, b.conflict_in);
rule forward_conflict;
b.conflict(a.granted_op);
endrule
endmodule endmodule
endinstance endinstance
@ -91,13 +73,14 @@ endinstance
module mkPriorityMemArbiter(MemArbiter#(num_clients, addr)) module mkPriorityMemArbiter(MemArbiter#(num_clients, addr))
provisos (Bits#(addr, _), provisos (Bits#(addr, _),
Eq#(addr), Eq#(addr),
Min#(num_clients, 1, 1)); Min#(num_clients, 1, 1),
Alias#(client_idx, UInt#(TLog#(num_clients))));
Vector#(num_clients, RWire#(MemArbiterOp#(addr))) reqs <- replicateM(mkRWire()); Vector#(num_clients, RWire#(MemArbiterOp#(addr))) reqs <- replicateM(mkRWire());
Wire#(Vector#(num_clients, Bool)) grants <- mkBypassWire(); Wire#(Vector#(num_clients, Bool)) grants <- mkBypassWire();
RWire#(MemArbiterOp#(addr)) conflict_in <- mkRWire(); RWire#(MemArbiterOp#(addr)) conflict_op <- mkRWire();
RWire#(MemArbiterOp#(addr)) granted_op_out <- mkRWire(); RWire#(client_idx) granted_idx <- mkRWire();
(* no_implicit_conditions, fire_when_enabled *) (* no_implicit_conditions, fire_when_enabled *)
rule grant_requests; rule grant_requests;
@ -106,11 +89,11 @@ module mkPriorityMemArbiter(MemArbiter#(num_clients, addr))
for (Integer i=0; i<valueOf(num_clients); i=i+1) begin for (Integer i=0; i<valueOf(num_clients); i=i+1) begin
if (reqs[i].wget() matches tagged Valid .req &&& if (reqs[i].wget() matches tagged Valid .req &&&
!mem_ops_conflict(conflict_in.wget(), reqs[i].wget()) &&& !mem_ops_conflict(conflict_op.wget(), reqs[i].wget()) &&&
!done) begin !done) begin
done = True; done = True;
grant[i] = True; grant[i] = True;
granted_op_out.wset(req); granted_idx.wset(fromInteger(i));
end end
end end
@ -125,24 +108,29 @@ module mkPriorityMemArbiter(MemArbiter#(num_clients, addr))
endinterface); endinterface);
interface ports = _ifcs; interface ports = _ifcs;
method conflict = conflict_in.wset; method client_idx granted_port() if (granted_idx.wget() matches tagged Valid .idx);
method MemArbiterOp#(addr) granted_op() if (granted_op_out.wget() matches tagged Valid .op); return idx;
endmethod
method MemArbiterOp#(addr) conflict_out() if (granted_idx.wget() matches tagged Valid .idx &&&
reqs[idx].wget() matches tagged Valid .op);
return op; return op;
endmethod endmethod
method conflict_in = conflict_op.wset;
endmodule endmodule
typedef struct { typedef struct {
Vector#(n, Bool) grant_vec; Vector#(n, Bool) grant_vec;
Maybe#(MemArbiterOp#(addr)) granted_op; Maybe#(UInt#(TLog#(n))) granted_idx;
} GrantResult#(numeric type n, type addr) deriving (Bits, Eq, FShow); } GrantResult#(numeric type n, type addr) deriving (Bits, Eq, FShow);
// select_grant computes which one entry of requests should be // select_grant computes which one entry of requests should be
// granted. Priority order is descending starting from // granted. Priority order is descending starting from
// requests[hipri]. // requests[hipri].
function GrantResult#(n, addr) select_grant(Vector#(n, Maybe#(MemArbiterOp#(addr))) requests, function GrantResult#(n, addr) select_grant(Vector#(n, Maybe#(MemArbiterOp#(addr))) requests,
UInt#(TLog#(n)) hipri, client_idx hipri,
Maybe#(MemArbiterOp#(addr)) conflict) Maybe#(MemArbiterOp#(addr)) conflict)
provisos (Eq#(addr)); provisos (Eq#(addr),
Alias#(client_idx, UInt#(TLog#(n))));
function onehot(idx); function onehot(idx);
let ret = replicate(False); let ret = replicate(False);
@ -151,13 +139,15 @@ function GrantResult#(n, addr) select_grant(Vector#(n, Maybe#(MemArbiterOp#(addr
endfunction endfunction
function GrantResult#(n, addr) do_fold(GrantResult#(n, addr) acc, function GrantResult#(n, addr) do_fold(GrantResult#(n, addr) acc,
Tuple2#(UInt#(TLog#(n)), Tuple2#(client_idx,
Maybe#(MemArbiterOp#(addr))) next); Maybe#(MemArbiterOp#(addr))) next);
match {.idx, .mreq} = next; match {.idx, .mreq} = next;
if (mreq matches tagged Valid .req &&& acc.granted_op matches tagged Invalid &&& !mem_ops_conflict(conflict, mreq)) if (mreq matches tagged Valid .req &&&
acc.granted_idx matches tagged Invalid &&&
!mem_ops_conflict(conflict, mreq))
return GrantResult{ return GrantResult{
grant_vec: onehot(idx), grant_vec: onehot(idx),
granted_op: tagged Valid req granted_idx: tagged Valid idx
}; };
else else
// Previous grant won, not requesting, or request not satisfiable. // Previous grant won, not requesting, or request not satisfiable.
@ -168,7 +158,7 @@ function GrantResult#(n, addr) select_grant(Vector#(n, Maybe#(MemArbiterOp#(addr
let rot = rotateBy(in, fromInteger(valueOf(n)-1)-hipri+1); let rot = rotateBy(in, fromInteger(valueOf(n)-1)-hipri+1);
let seed = GrantResult{ let seed = GrantResult{
grant_vec: replicate(False), grant_vec: replicate(False),
granted_op: tagged Invalid granted_idx: tagged Invalid
}; };
return foldl(do_fold, seed, rot); return foldl(do_fold, seed, rot);
endfunction endfunction
@ -176,19 +166,20 @@ endfunction
module mkRoundRobinMemArbiter(MemArbiter#(num_clients, addr)) module mkRoundRobinMemArbiter(MemArbiter#(num_clients, addr))
provisos (Bits#(addr, _), provisos (Bits#(addr, _),
Eq#(addr), Eq#(addr),
Min#(num_clients, 1, 1)); Min#(num_clients, 1, 1),
Alias#(client_idx, UInt#(TLog#(num_clients))));
Vector#(num_clients, RWire#(MemArbiterOp#(addr))) reqs <- replicateM(mkRWire); Vector#(num_clients, RWire#(MemArbiterOp#(addr))) reqs <- replicateM(mkRWire);
Wire#(Vector#(num_clients, Bool)) grants <- mkBypassWire(); Wire#(Vector#(num_clients, Bool)) grants <- mkBypassWire();
RWire#(MemArbiterOp#(addr)) conflict_in <- mkRWire(); RWire#(MemArbiterOp#(addr)) conflict_op <- mkRWire();
RWire#(MemArbiterOp#(addr)) granted_op_out <- mkRWire(); RWire#(client_idx) granted_idx_out <- mkRWire();
// high_prio is the index of the client that should be first in // high_prio is the index of the client that should be first in
// line to receive access. Every time we grant access to a client, // line to receive access. Every time we grant access to a client,
// the one after that in sequence becomes high_prio in the next // the one after that in sequence becomes high_prio in the next
// round. // round.
Reg#(UInt#(TLog#(num_clients))) high_prio <- mkReg(0); Reg#(client_idx) high_prio <- mkReg(0);
function Maybe#(_t) get_mreq(RWire#(_t) w); function Maybe#(_t) get_mreq(RWire#(_t) w);
return w.wget(); return w.wget();
@ -196,11 +187,11 @@ module mkRoundRobinMemArbiter(MemArbiter#(num_clients, addr))
rule grant; rule grant;
let in = map(get_mreq, reqs); let in = map(get_mreq, reqs);
let res = select_grant(in, high_prio, conflict_in.wget()); let res = select_grant(in, high_prio, conflict_op.wget());
grants <= res.grant_vec; grants <= res.grant_vec;
if (res.granted_op matches tagged Valid .op) begin if (res.granted_idx matches tagged Valid .idx) begin
granted_op_out.wset(op); granted_idx_out.wset(idx);
high_prio <= validValue(findElem(True, rotateR(res.grant_vec))); high_prio <= validValue(findElem(True, rotateR(res.grant_vec)));
end end
endrule endrule
@ -213,10 +204,14 @@ module mkRoundRobinMemArbiter(MemArbiter#(num_clients, addr))
endinterface); endinterface);
interface ports = _ifcs; interface ports = _ifcs;
method conflict = conflict_in.wset; method client_idx granted_port() if (granted_idx_out.wget() matches tagged Valid .idx);
method MemArbiterOp#(addr) granted_op() if (granted_op_out.wget() matches tagged Valid .op); return idx;
endmethod
method MemArbiterOp#(addr) conflict_out() if (granted_idx_out.wget() matches tagged Valid .idx &&&
reqs[idx].wget() matches tagged Valid .op);
return op; return op;
endmethod endmethod
method conflict_in = conflict_op.wset;
endmodule endmodule
endpackage endpackage

View File

@ -19,7 +19,7 @@ typedef struct {
Maybe#(MemArbiterOp#(Addr)) conflict; Maybe#(MemArbiterOp#(Addr)) conflict;
Vector#(n, Bool) want_grants; Vector#(n, Bool) want_grants;
Maybe#(MemArbiterOp#(Addr)) want_granted_op; Maybe#(MemArbiterOp#(Addr)) want_conflict_out;
} TestCase#(numeric type n) deriving (Bits, Eq); } TestCase#(numeric type n) deriving (Bits, Eq);
function Maybe#(MemArbiterOp#(Addr)) read(Addr addr); function Maybe#(MemArbiterOp#(Addr)) read(Addr addr);
@ -54,13 +54,13 @@ function TestCase#(n) testCase(String name,
Vector#(n, Maybe#(MemArbiterOp#(Addr))) reqs, Vector#(n, Maybe#(MemArbiterOp#(Addr))) reqs,
Maybe#(MemArbiterOp#(Addr)) conflict, Maybe#(MemArbiterOp#(Addr)) conflict,
Vector#(n, Bool) want_grants, Vector#(n, Bool) want_grants,
Maybe#(MemArbiterOp#(Addr)) want_granted_op); Maybe#(MemArbiterOp#(Addr)) want_conflict_out);
return TestCase{ return TestCase{
name: name, name: name,
reqs: reqs, reqs: reqs,
conflict: conflict, conflict: conflict,
want_grants: want_grants, want_grants: want_grants,
want_granted_op: want_granted_op want_conflict_out: want_conflict_out
}; };
endfunction endfunction
@ -84,14 +84,14 @@ module mkArbiterTB(MemArbiter#(n, Addr) dut, Vector#(m, TestCase#(n)) tests, TB
(* no_implicit_conditions, fire_when_enabled *) (* no_implicit_conditions, fire_when_enabled *)
rule forbid (running && isValid(tests[idx].conflict)); rule forbid (running && isValid(tests[idx].conflict));
dut.conflict(validValue(tests[idx].conflict)); dut.conflict_in(validValue(tests[idx].conflict));
endrule endrule
Wire#(Maybe#(MemArbiterOp#(Addr))) got_granted_op <- mkDWire(tagged Invalid); Wire#(Maybe#(MemArbiterOp#(Addr))) got_conflict_out <- mkDWire(tagged Invalid);
(* fire_when_enabled *) (* fire_when_enabled *)
rule collect_granted_op (running); rule collect_conflict_out (running);
got_granted_op <= tagged Valid dut.granted_op(); got_conflict_out <= tagged Valid dut.conflict_out();
endrule endrule
function Fmt req_s(Maybe#(MemArbiterOp#(Addr)) v); function Fmt req_s(Maybe#(MemArbiterOp#(Addr)) v);
@ -107,13 +107,13 @@ module mkArbiterTB(MemArbiter#(n, Addr) dut, Vector#(m, TestCase#(n)) tests, TB
let test = tests[idx]; let test = tests[idx];
let reqs = test.reqs; let reqs = test.reqs;
let want_grants = test.want_grants; let want_grants = test.want_grants;
let want_granted_op = test.want_granted_op; let want_conflict_out = test.want_conflict_out;
Vector#(n, Bool) got_grants = newVector; Vector#(n, Bool) got_grants = newVector;
for (Integer i=0; i<valueOf(n); i=i+1) for (Integer i=0; i<valueOf(n); i=i+1)
got_grants[i] = dut.ports[i].grant(); got_grants[i] = dut.ports[i].grant();
$display("RUN %s (%0d)", tests[idx].name, idx); $display("RUN %s (%0d)", tests[idx].name, idx);
if (got_grants != want_grants || got_granted_op != want_granted_op) begin if (got_grants != want_grants || got_conflict_out != want_conflict_out) begin
$display("input:"); $display("input:");
for (Integer i=0; i<valueOf(n); i=i+1) for (Integer i=0; i<valueOf(n); i=i+1)
$display(" ", $format("%0d", i), ": ", req_s(reqs[i])); $display(" ", $format("%0d", i), ": ", req_s(reqs[i]));
@ -121,10 +121,10 @@ module mkArbiterTB(MemArbiter#(n, Addr) dut, Vector#(m, TestCase#(n)) tests, TB
$display(" output:"); $display(" output:");
$display(" grants: ", fshow(got_grants)); $display(" grants: ", fshow(got_grants));
$display(" granted: ", fshow(got_granted_op)); $display(" granted: ", fshow(got_conflict_out));
$display(" want grants: ", fshow(tests[idx].want_grants)); $display(" want grants: ", fshow(tests[idx].want_grants));
$display(" want granted: ", fshow(want_granted_op)); $display(" want granted: ", fshow(want_conflict_out));
dynamicAssert(False, "wrong arbiter output"); dynamicAssert(False, "wrong arbiter output");
end end

99
vram/VRAM.bsv Normal file
View File

@ -0,0 +1,99 @@
package VRAM;
import Connectable::*;
import GetPut::*;
import ClientServer::*;
import Vector::*;
import FIFOF::*;
import SpecialFIFOs::*;
import MemArbiter::*;
import VRAMCore::*;
// Re-exports from VRAMCore
export VRAMAddr, VRAMData, VRAMRequest(..), VRAMResponse(..);
export VRAMServer(..);
export VRAM(..), mkVRAM;
typedef Server#(VRAMRequest, VRAMResponse) VRAMServer;
// mkArbitratedVRAMServers expands a VRAMServer port into multiple
// ports through the use of a MemArbiter.
module mkArbitratedVRAMServers(VRAMServer ram, MemArbiter#(n, VRAMAddr) arb, Vector#(n, VRAMServer) ifc)
provisos (Min#(n, 1, 1),
Alias#(port_idx, UInt#(TLog#(n))));
Vector#(n, FIFOF#(VRAMRequest)) requests <- replicateM(mkBypassFIFOF());
Vector#(n, FIFOF#(VRAMResponse)) responses <- replicateM(mkBypassFIFOF());
Reg#(Maybe#(port_idx)) awaiting_response[2] <- mkCReg(2, tagged Invalid);
(* fire_when_enabled *)
rule request_ports;
for (Integer i=0; i<valueOf(n); i=i+1)
if (requests[i].notEmpty) begin
let req = requests[i].first;
let arb_req = MemArbiterOp{
write: isValid(req.data),
addr: req.addr
};
arb.ports[i].request(arb_req);
end
endrule
(* fire_when_enabled *)
rule submit (awaiting_response[1] matches tagged Invalid);
let port = arb.granted_port();
ram.request.put(requests[port].first);
requests[port].deq();
awaiting_response[1] <= tagged Valid port;
endrule
(* fire_when_enabled *)
rule response (awaiting_response[0] matches tagged Valid .port &&& responses[port].notFull);
let resp <- ram.response.get();
responses[port].enq(resp);
awaiting_response[0] <= tagged Invalid;
endrule
return map(uncurry(toGPServer), zip(requests, responses));
endmodule
// VRAM is a GARY video RAM and its memory ports.
interface VRAM;
interface VRAMServer cpu;
interface VRAMServer debugger;
interface VRAMServer palette;
interface VRAMServer tile1;
interface VRAMServer tile2;
interface VRAMServer sprite;
endinterface
// mkVRAM constructs a VRAM of the requested size. Memory access is
// spread across two internal ports as follows:
//
// Port A: strict most-important-wins priority: CPU, then debugger,
// then palette DAC.
// Port B: equal round-robin prioritization between two tile engines
// and the sprite engine.
module mkVRAM(Integer num_kilobytes, VRAM ifc);
VRAMCore ram <- mkVRAMCore(num_kilobytes);
MemArbiter#(3, VRAMAddr) arbA <- mkPriorityMemArbiter();
Vector#(3, VRAMServer) portA <- mkArbitratedVRAMServers(ram.portA, arbA);
MemArbiter#(3, VRAMAddr) arbB <- mkRoundRobinMemArbiter();
Vector#(3, VRAMServer) portB <- mkArbitratedVRAMServers(ram.portB, arbB);
// Connect up the arbiters so they correctly prevent write-write
// and write-read conflicts.
mkConnection(arbA, arbB);
interface cpu = portA[0];
interface debugger = portA[1];
interface palette = portA[2];
interface tile1 = portB[0];
interface tile2 = portB[1];
interface sprite = portB[2];
endmodule
endpackage

View File

@ -1,48 +1,82 @@
package VRAMCore; package VRAMCore;
import Connectable::*;
import GetPut::*; import GetPut::*;
import ClientServer::*; import ClientServer::*;
import DReg::*; import BRAMCore::*;
import BRAM::*;
import Vector::*;
import FIFOF::*;
import SpecialFIFOs::*;
import Real::*; import Real::*;
import Printf::*;
import DelayLine::*; import DelayLine::*;
import ECP5_RAM::*; import ECP5_RAM::*;
export VRAMAddr; export VRAMAddr;
export VRAMData; export VRAMData;
export VRAMRequest; export VRAMRequest(..);
export VRAMResponse; export VRAMResponse(..);
export VRAMClient; export VRAMCore(..);
export VRAMServer;
export VRAMCore;
export mkVRAMCore; export mkVRAMCore;
typedef Bit#(8) VRAMData; typedef Bit#(8) VRAMData;
// Each byte RAM we build below can address 4096 bytes, which is 12 typedef UInt#(17) VRAMAddr;
// address bits. typedef UInt#(2) ArrayAddr;
typedef UInt#(12) ByteAddr;
typedef UInt#(3) ChipAddr; typedef UInt#(3) ChipAddr;
typedef UInt#(12) ByteAddr;
// ByteRAM is two EBRs glued together to make a whole-byte memory. // ByteRAM is two EBRs glued together to make a whole-byte memory.
typedef EBR#(ByteAddr, VRAMData, ByteAddr, VRAMData) ByteRAM; typedef EBR#(ByteAddr, VRAMData, ByteAddr, VRAMData) ByteRAM;
typedef struct {
VRAMAddr addr;
Maybe#(VRAMData) data;
} VRAMRequest deriving (Bits, Eq);
typedef struct {
VRAMData data;
} VRAMResponse deriving (Bits, Eq);
module mkNibbleRAM_ECP5(ChipAddr chip_addr, EBR#(ByteAddr, Bit#(4), ByteAddr, Bit#(4)) ifc);
EBRPortConfig cfg = defaultValue;
cfg.chip_select_addr = chip_addr;
let _ret <- mkEBRCore(cfg, cfg);
return _ret;
endmodule
module mkNibbleRAM_Sim(ChipAddr chip_addr, EBR#(ByteAddr, Bit#(4), ByteAddr, Bit#(4)) ifc);
BRAM_DUAL_PORT#(ByteAddr, Bit#(4)) ram <- mkBRAMCore2(4096, False);
interface EBRPort portA;
method Action put(UInt#(3) chip_select, Bool write, ByteAddr address, Bit#(4) datain);
if (chip_select == chip_addr)
ram.a.put(write, address, datain);
endmethod
method read = ram.a.read;
endinterface
interface EBRPort portB;
method Action put(UInt#(3) chip_select, Bool write, ByteAddr address, Bit#(4) datain);
if (chip_select == chip_addr)
ram.b.put(write, address, datain);
endmethod
method read = ram.b.read;
endinterface
endmodule
module mkNibbleRAM(ChipAddr chip_addr, EBR#(ByteAddr, Bit#(4), ByteAddr, Bit#(4)) ifc);
let _ret;
if (genC())
_ret <- mkNibbleRAM_Sim(chip_addr);
else
_ret <- mkNibbleRAM_ECP5(chip_addr);
return _ret;
endmodule
// mkByteRAM glues two ECP5 EBRs together to make a 4096x8b memory // mkByteRAM glues two ECP5 EBRs together to make a 4096x8b memory
// block. Like the underlying ECP5 EBRs, callers must bring their own // block. Like the underlying ECP5 EBRs, callers must bring their own
// flow control to read out responses one cycle after putting a read // flow control to read out responses one cycle after putting a read
// request. // request.
module mkByteRAM(ChipAddr chip_addr, ByteRAM ifc); module mkByteRAM(ChipAddr chip_addr, ByteRAM ifc);
EBRPortConfig cfg = defaultValue; EBR#(ByteAddr, Bit#(4), ByteAddr, Bit#(4)) upper <- mkNibbleRAM(chip_addr);
cfg.chip_select_addr = chip_addr; EBR#(ByteAddr, Bit#(4), ByteAddr, Bit#(4)) lower <- mkNibbleRAM(chip_addr);
EBR#(ByteAddr, Bit#(4), ByteAddr, Bit#(4)) upper <- mkEBRCore(cfg, cfg);
EBR#(ByteAddr, Bit#(4), ByteAddr, Bit#(4)) lower <- mkEBRCore(cfg, cfg);
interface EBRPort portA; interface EBRPort portA;
method Action put(ChipAddr chip_select, Bool write, ByteAddr addr, VRAMData data_in); method Action put(ChipAddr chip_select, Bool write, ByteAddr addr, VRAMData data_in);
@ -119,25 +153,9 @@ module mkByteRAMArray(Integer num_chips, ByteRAM ifc);
endinterface endinterface
endmodule endmodule
typedef UInt#(2) ArrayAddr;
typedef UInt#(17) VRAMAddr;
typedef struct {
VRAMAddr addr;
Maybe#(VRAMData) data;
} VRAMRequest deriving (Bits, Eq);
typedef struct {
VRAMData data;
} VRAMResponse deriving (Bits, Eq);
typedef Server#(VRAMRequest, VRAMResponse) VRAMServer;
typedef Client#(VRAMRequest, VRAMResponse) VRAMClient;
interface VRAMCore; interface VRAMCore;
interface VRAMServer portA; interface Server#(VRAMRequest, VRAMResponse) portA;
interface VRAMServer portB; interface Server#(VRAMRequest, VRAMResponse) portB;
endinterface endinterface
// mkVRAMCore creates a dual port VRAM of the specified size, using // mkVRAMCore creates a dual port VRAM of the specified size, using
@ -163,11 +181,7 @@ module mkVRAMCore(Integer num_kilobytes, VRAMCore ifc);
let num_arrays = ceil(fromInteger(num_byterams) / 8); let num_arrays = ceil(fromInteger(num_byterams) / 8);
function Tuple3#(ArrayAddr, ChipAddr, ByteAddr) split_addr(VRAMAddr a); function Tuple3#(ArrayAddr, ChipAddr, ByteAddr) split_addr(VRAMAddr a);
if (num_bytes < 128*1024) return unpack(pack(a));
a = a % fromInteger(num_bytes);
match {.top, .byteaddr} = split(pack(a));
Tuple2#(Bit#(SizeOf#(ArrayAddr)), Bit#(SizeOf#(ChipAddr))) route = split(top);
return tuple3(unpack(tpl_1(route)), unpack(tpl_2(route)), unpack(byteaddr));
endfunction endfunction
ByteRAM arrays[num_arrays]; ByteRAM arrays[num_arrays];
@ -179,7 +193,7 @@ module mkVRAMCore(Integer num_kilobytes, VRAMCore ifc);
Reg#(Maybe#(ArrayAddr)) inflight_A[2] <- mkCReg(2, tagged Invalid); Reg#(Maybe#(ArrayAddr)) inflight_A[2] <- mkCReg(2, tagged Invalid);
Reg#(Maybe#(ArrayAddr)) inflight_B[2] <- mkCReg(2, tagged Invalid); Reg#(Maybe#(ArrayAddr)) inflight_B[2] <- mkCReg(2, tagged Invalid);
interface VRAMServer portA; interface Server portA;
interface Put request; interface Put request;
method Action put(VRAMRequest req) if (inflight_A[1] matches tagged Invalid); method Action put(VRAMRequest req) if (inflight_A[1] matches tagged Invalid);
match {.array, .chip, .byteaddr} = split_addr(req.addr); match {.array, .chip, .byteaddr} = split_addr(req.addr);
@ -196,7 +210,7 @@ module mkVRAMCore(Integer num_kilobytes, VRAMCore ifc);
endinterface endinterface
endinterface endinterface
interface VRAMServer portB; interface Server portB;
interface Put request; interface Put request;
method Action put(VRAMRequest req) if (inflight_B[1] matches tagged Invalid); method Action put(VRAMRequest req) if (inflight_B[1] matches tagged Invalid);
match {.array, .chip, .byteaddr} = split_addr(req.addr); match {.array, .chip, .byteaddr} = split_addr(req.addr);