diff options
Diffstat (limited to 'VexRiscv/src/main/scala/vexriscv/demo')
37 files changed, 6584 insertions, 0 deletions
diff --git a/VexRiscv/src/main/scala/vexriscv/demo/Briey.scala b/VexRiscv/src/main/scala/vexriscv/demo/Briey.scala new file mode 100644 index 0000000..32e6d62 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/Briey.scala @@ -0,0 +1,490 @@ +package vexriscv.demo + + +import vexriscv.plugin._ +import vexriscv._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import spinal.core._ +import spinal.lib._ +import spinal.lib.bus.amba3.apb._ +import spinal.lib.bus.amba4.axi._ +import spinal.lib.com.jtag.Jtag +import spinal.lib.com.jtag.sim.JtagTcp +import spinal.lib.com.uart.sim.{UartDecoder, UartEncoder} +import spinal.lib.com.uart.{Apb3UartCtrl, Uart, UartCtrlGenerics, UartCtrlMemoryMappedConfig} +import spinal.lib.graphic.RgbConfig +import spinal.lib.graphic.vga.{Axi4VgaCtrl, Axi4VgaCtrlGenerics, Vga} +import spinal.lib.io.TriStateArray +import spinal.lib.memory.sdram.SdramGeneration.SDR +import spinal.lib.memory.sdram._ +import spinal.lib.memory.sdram.sdr.sim.SdramModel +import spinal.lib.memory.sdram.sdr.{Axi4SharedSdramCtrl, IS42x320D, SdramInterface, SdramTimings} +import spinal.lib.misc.HexTools +import spinal.lib.soc.pinsec.{PinsecTimerCtrl, PinsecTimerCtrlExternal} +import spinal.lib.system.debugger.{JtagAxi4SharedDebugger, JtagBridge, SystemDebugger, SystemDebuggerConfig} + +import scala.collection.mutable.ArrayBuffer +import scala.collection.Seq + +case class BrieyConfig(axiFrequency : HertzNumber, + onChipRamSize : BigInt, + sdramLayout: SdramLayout, + sdramTimings: SdramTimings, + cpuPlugins : ArrayBuffer[Plugin[VexRiscv]], + uartCtrlConfig : UartCtrlMemoryMappedConfig) + +object BrieyConfig{ + + def default = { + val config = BrieyConfig( + axiFrequency = 50 MHz, + onChipRamSize = 4 kB, + sdramLayout = IS42x320D.layout, + sdramTimings = IS42x320D.timingGrade7, + uartCtrlConfig = UartCtrlMemoryMappedConfig( + uartCtrlConfig = UartCtrlGenerics( + dataWidthMax = 8, + clockDividerWidth = 20, + preSamplingSize = 1, + samplingSize = 5, + postSamplingSize = 2 + ), + txFifoDepth = 16, + rxFifoDepth = 16 + ), + cpuPlugins = ArrayBuffer( + new PcManagerSimplePlugin(0x80000000l, false), + // new IBusSimplePlugin( + // interfaceKeepData = false, + // catchAccessFault = true + // ), + new IBusCachedPlugin( + resetVector = 0x80000000l, + prediction = STATIC, + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine =32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = true, + twoCycleCache = true + ) + // askMemoryTranslation = true, + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 4 + // ) + ), + // new DBusSimplePlugin( + // catchAddressMisaligned = true, + // catchAccessFault = true + // ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ), + memoryTranslatorPortConfig = null + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 6 + // ) + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new MulPlugin, + new DivPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new CsrPlugin( + config = CsrPluginConfig( + catchIllegalAccess = false, + mvendorid = null, + marchid = null, + mimpid = null, + mhartid = null, + misaExtensionsInit = 66, + misaAccess = CsrAccess.NONE, + mtvecAccess = CsrAccess.NONE, + mtvecInit = 0x80000020l, + mepcAccess = CsrAccess.READ_WRITE, + mscratchGen = false, + mcauseAccess = CsrAccess.READ_ONLY, + mbadaddrAccess = CsrAccess.READ_ONLY, + mcycleAccess = CsrAccess.NONE, + minstretAccess = CsrAccess.NONE, + ecallGen = false, + wfiGenAsWait = false, + ucycleAccess = CsrAccess.NONE, + uinstretAccess = CsrAccess.NONE + ) + ), + new YamlPlugin("cpu0.yaml") + ) + ) + config + } +} + + + +class Briey(val config: BrieyConfig) extends Component{ + + //Legacy constructor + def this(axiFrequency: HertzNumber) { + this(BrieyConfig.default.copy(axiFrequency = axiFrequency)) + } + + import config._ + val debug = true + val interruptCount = 4 + def vgaRgbConfig = RgbConfig(5,6,5) + + val io = new Bundle{ + //Clocks / reset + val asyncReset = in Bool() + val axiClk = in Bool() + val vgaClk = in Bool() + + //Main components IO + val jtag = slave(Jtag()) + val sdram = master(SdramInterface(sdramLayout)) + + //Peripherals IO + val gpioA = master(TriStateArray(32 bits)) + val gpioB = master(TriStateArray(32 bits)) + val uart = master(Uart()) + val vga = master(Vga(vgaRgbConfig)) + val timerExternal = in(PinsecTimerCtrlExternal()) + val coreInterrupt = in Bool() + } + + val resetCtrlClockDomain = ClockDomain( + clock = io.axiClk, + config = ClockDomainConfig( + resetKind = BOOT + ) + ) + + val resetCtrl = new ClockingArea(resetCtrlClockDomain) { + val systemResetUnbuffered = False + // val coreResetUnbuffered = False + + //Implement an counter to keep the reset axiResetOrder high 64 cycles + // Also this counter will automaticly do a reset when the system boot. + val systemResetCounter = Reg(UInt(6 bits)) init(0) + when(systemResetCounter =/= U(systemResetCounter.range -> true)){ + systemResetCounter := systemResetCounter + 1 + systemResetUnbuffered := True + } + when(BufferCC(io.asyncReset)){ + systemResetCounter := 0 + } + + //Create all reset used later in the design + val systemReset = RegNext(systemResetUnbuffered) + val axiReset = RegNext(systemResetUnbuffered) + val vgaReset = BufferCC(axiReset) + } + + val axiClockDomain = ClockDomain( + clock = io.axiClk, + reset = resetCtrl.axiReset, + frequency = FixedFrequency(axiFrequency) //The frequency information is used by the SDRAM controller + ) + + val debugClockDomain = ClockDomain( + clock = io.axiClk, + reset = resetCtrl.systemReset, + frequency = FixedFrequency(axiFrequency) + ) + + val vgaClockDomain = ClockDomain( + clock = io.vgaClk, + reset = resetCtrl.vgaReset + ) + + val axi = new ClockingArea(axiClockDomain) { + val ram = Axi4SharedOnChipRam( + dataWidth = 32, + byteCount = onChipRamSize, + idWidth = 4 + ) + + val sdramCtrl = Axi4SharedSdramCtrl( + axiDataWidth = 32, + axiIdWidth = 4, + layout = sdramLayout, + timing = sdramTimings, + CAS = 3 + ) + + + val apbBridge = Axi4SharedToApb3Bridge( + addressWidth = 20, + dataWidth = 32, + idWidth = 4 + ) + + val gpioACtrl = Apb3Gpio( + gpioWidth = 32, + withReadSync = true + ) + val gpioBCtrl = Apb3Gpio( + gpioWidth = 32, + withReadSync = true + ) + val timerCtrl = PinsecTimerCtrl() + + + val uartCtrl = Apb3UartCtrl(uartCtrlConfig) + uartCtrl.io.apb.addAttribute(Verilator.public) + + + val vgaCtrlConfig = Axi4VgaCtrlGenerics( + axiAddressWidth = 32, + axiDataWidth = 32, + burstLength = 8, + frameSizeMax = 2048*1512*2, + fifoSize = 512, + rgbConfig = vgaRgbConfig, + vgaClock = vgaClockDomain + ) + val vgaCtrl = Axi4VgaCtrl(vgaCtrlConfig) + + + + val core = new Area{ + val config = VexRiscvConfig( + plugins = cpuPlugins += new DebugPlugin(debugClockDomain) + ) + + val cpu = new VexRiscv(config) + var iBus : Axi4ReadOnly = null + var dBus : Axi4Shared = null + for(plugin <- config.plugins) plugin match{ + case plugin : IBusSimplePlugin => iBus = plugin.iBus.toAxi4ReadOnly() + case plugin : IBusCachedPlugin => iBus = plugin.iBus.toAxi4ReadOnly() + case plugin : DBusSimplePlugin => dBus = plugin.dBus.toAxi4Shared() + case plugin : DBusCachedPlugin => dBus = plugin.dBus.toAxi4Shared(true) + case plugin : CsrPlugin => { + plugin.externalInterrupt := BufferCC(io.coreInterrupt) + plugin.timerInterrupt := timerCtrl.io.interrupt + } + case plugin : DebugPlugin => debugClockDomain{ + resetCtrl.axiReset setWhen(RegNext(plugin.io.resetOut)) + io.jtag <> plugin.io.bus.fromJtag() + } + case _ => + } + } + + + val axiCrossbar = Axi4CrossbarFactory() + + axiCrossbar.addSlaves( + ram.io.axi -> (0x80000000L, onChipRamSize), + sdramCtrl.io.axi -> (0x40000000L, sdramLayout.capacity), + apbBridge.io.axi -> (0xF0000000L, 1 MB) + ) + + axiCrossbar.addConnections( + core.iBus -> List(ram.io.axi, sdramCtrl.io.axi), + core.dBus -> List(ram.io.axi, sdramCtrl.io.axi, apbBridge.io.axi), + vgaCtrl.io.axi -> List( sdramCtrl.io.axi) + ) + + + axiCrossbar.addPipelining(apbBridge.io.axi)((crossbar,bridge) => { + crossbar.sharedCmd.halfPipe() >> bridge.sharedCmd + crossbar.writeData.halfPipe() >> bridge.writeData + crossbar.writeRsp << bridge.writeRsp + crossbar.readRsp << bridge.readRsp + }) + + axiCrossbar.addPipelining(sdramCtrl.io.axi)((crossbar,ctrl) => { + crossbar.sharedCmd.halfPipe() >> ctrl.sharedCmd + crossbar.writeData >/-> ctrl.writeData + crossbar.writeRsp << ctrl.writeRsp + crossbar.readRsp << ctrl.readRsp + }) + + axiCrossbar.addPipelining(ram.io.axi)((crossbar,ctrl) => { + crossbar.sharedCmd.halfPipe() >> ctrl.sharedCmd + crossbar.writeData >/-> ctrl.writeData + crossbar.writeRsp << ctrl.writeRsp + crossbar.readRsp << ctrl.readRsp + }) + + axiCrossbar.addPipelining(vgaCtrl.io.axi)((ctrl,crossbar) => { + ctrl.readCmd.halfPipe() >> crossbar.readCmd + ctrl.readRsp << crossbar.readRsp + }) + + axiCrossbar.addPipelining(core.dBus)((cpu,crossbar) => { + cpu.sharedCmd >> crossbar.sharedCmd + cpu.writeData >> crossbar.writeData + cpu.writeRsp << crossbar.writeRsp + cpu.readRsp <-< crossbar.readRsp //Data cache directly use read responses without buffering, so pipeline it for FMax + }) + + axiCrossbar.build() + + + val apbDecoder = Apb3Decoder( + master = apbBridge.io.apb, + slaves = List( + gpioACtrl.io.apb -> (0x00000, 4 kB), + gpioBCtrl.io.apb -> (0x01000, 4 kB), + uartCtrl.io.apb -> (0x10000, 4 kB), + timerCtrl.io.apb -> (0x20000, 4 kB), + vgaCtrl.io.apb -> (0x30000, 4 kB) + ) + ) + } + + io.gpioA <> axi.gpioACtrl.io.gpio + io.gpioB <> axi.gpioBCtrl.io.gpio + io.timerExternal <> axi.timerCtrl.io.external + io.uart <> axi.uartCtrl.io.uart + io.sdram <> axi.sdramCtrl.io.sdram + io.vga <> axi.vgaCtrl.io.vga +} + +//DE1-SoC +object Briey{ + def main(args: Array[String]) { + val config = SpinalConfig() + config.generateVerilog({ + val toplevel = new Briey(BrieyConfig.default) + toplevel.axi.vgaCtrl.vga.ctrl.io.error.addAttribute(Verilator.public) + toplevel.axi.vgaCtrl.vga.ctrl.io.frameStart.addAttribute(Verilator.public) + toplevel + }) + } +} + +//DE1-SoC with memory init +object BrieyWithMemoryInit{ + def main(args: Array[String]) { + val config = SpinalConfig() + config.generateVerilog({ + val toplevel = new Briey(BrieyConfig.default) + toplevel.axi.vgaCtrl.vga.ctrl.io.error.addAttribute(Verilator.public) + toplevel.axi.vgaCtrl.vga.ctrl.io.frameStart.addAttribute(Verilator.public) + HexTools.initRam(toplevel.axi.ram.ram, "src/main/ressource/hex/muraxDemo.hex", 0x80000000l) + toplevel + }) + } +} + + +//DE0-Nano +object BrieyDe0Nano{ + def main(args: Array[String]) { + object IS42x160G { + def layout = SdramLayout( + generation = SDR, + bankWidth = 2, + columnWidth = 9, + rowWidth = 13, + dataWidth = 16 + ) + + def timingGrade7 = SdramTimings( + bootRefreshCount = 8, + tPOW = 100 us, + tREF = 64 ms, + tRC = 60 ns, + tRFC = 60 ns, + tRAS = 37 ns, + tRP = 15 ns, + tRCD = 15 ns, + cMRD = 2, + tWR = 10 ns, + cWR = 1 + ) + } + val config = SpinalConfig() + config.generateVerilog({ + val toplevel = new Briey(BrieyConfig.default.copy(sdramLayout = IS42x160G.layout)) + toplevel + }) + } +} + + + +import spinal.core.sim._ +object BrieySim { + def main(args: Array[String]): Unit = { + val simSlowDown = false + SimConfig.allOptimisation.compile(new Briey(BrieyConfig.default)).doSimUntilVoid{dut => + val mainClkPeriod = (1e12/dut.config.axiFrequency.toDouble).toLong + val jtagClkPeriod = mainClkPeriod*4 + val uartBaudRate = 115200 + val uartBaudPeriod = (1e12/uartBaudRate).toLong + + val clockDomain = ClockDomain(dut.io.axiClk, dut.io.asyncReset) + clockDomain.forkStimulus(mainClkPeriod) + + val tcpJtag = JtagTcp( + jtag = dut.io.jtag, + jtagClkPeriod = jtagClkPeriod + ) + + val uartTx = UartDecoder( + uartPin = dut.io.uart.txd, + baudPeriod = uartBaudPeriod + ) + + val uartRx = UartEncoder( + uartPin = dut.io.uart.rxd, + baudPeriod = uartBaudPeriod + ) + + val sdram = SdramModel( + dut.io.sdram, + dut.config.sdramLayout, + clockDomain + ) + + dut.io.coreInterrupt #= false + } + } +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/CustomCsrDemoPlugin.scala b/VexRiscv/src/main/scala/vexriscv/demo/CustomCsrDemoPlugin.scala new file mode 100644 index 0000000..a763c83 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/CustomCsrDemoPlugin.scala @@ -0,0 +1,63 @@ +package vexriscv.demo + +import spinal.core._ +import spinal.lib.io.TriStateArray +import spinal.lib.{Flow, master} +import vexriscv.plugin.{CsrInterface, Plugin} +import vexriscv.{DecoderService, Stageable, VexRiscv} + + + +class CustomCsrDemoPlugin extends Plugin[VexRiscv]{ + override def build(pipeline: VexRiscv): Unit = { + import pipeline._ + import pipeline.config._ + + pipeline plug new Area{ + val instructionCounter = Reg(UInt(32 bits)) + val cycleCounter = Reg(UInt(32 bits)) + + cycleCounter := cycleCounter + 1 + when(writeBack.arbitration.isFiring) { + instructionCounter := instructionCounter + 1 + } + + val csrService = pipeline.service(classOf[CsrInterface]) + csrService.rw(0xB04, instructionCounter) + csrService.r(0xB05, cycleCounter) + csrService.onWrite(0xB06){ + instructionCounter := 0 + } + csrService.onRead(0xB07){ + instructionCounter := 0x40000000 + } + } + } +} + + +class CustomCsrDemoGpioPlugin extends Plugin[VexRiscv]{ + var gpio : TriStateArray = null + + + override def setup(pipeline: VexRiscv): Unit = { + gpio = master(TriStateArray(32 bits)).setName("gpio") + } + + override def build(pipeline: VexRiscv): Unit = { + import pipeline._ + import pipeline.config._ + + pipeline plug new Area{ + val writeReg, writeEnableReg = Reg(Bits(32 bits)) + + val csrService = pipeline.service(classOf[CsrInterface]) + csrService.rw(0xB08, writeReg) + csrService.rw(0xB09, writeEnableReg) + csrService.r(0xB0A, gpio.read) + + gpio.writeEnable := writeEnableReg + gpio.write := writeReg + } + } +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/CustomInstruction.scala b/VexRiscv/src/main/scala/vexriscv/demo/CustomInstruction.scala new file mode 100644 index 0000000..dc35997 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/CustomInstruction.scala @@ -0,0 +1,75 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.plugin.Plugin +import vexriscv.{Stageable, DecoderService, VexRiscv} + +//This plugin example will add a new instruction named SIMD_ADD which do the following : +// +//RD : Regfile Destination, RS : Regfile Source +//RD( 7 downto 0) = RS1( 7 downto 0) + RS2( 7 downto 0) +//RD(16 downto 8) = RS1(16 downto 8) + RS2(16 downto 8) +//RD(23 downto 16) = RS1(23 downto 16) + RS2(23 downto 16) +//RD(31 downto 24) = RS1(31 downto 24) + RS2(31 downto 24) +// +//Instruction encoding : +//0000011----------000-----0110011 +// |RS2||RS1| |RD | +// +//Note : RS1, RS2, RD positions follow the RISC-V spec and are common for all instruction of the ISA + +class SimdAddPlugin extends Plugin[VexRiscv]{ + //Define the concept of IS_SIMD_ADD signals, which specify if the current instruction is destined for ths plugin + object IS_SIMD_ADD extends Stageable(Bool) + + //Callback to setup the plugin and ask for different services + override def setup(pipeline: VexRiscv): Unit = { + import pipeline.config._ + + //Retrieve the DecoderService instance + val decoderService = pipeline.service(classOf[DecoderService]) + + //Specify the IS_SIMD_ADD default value when instruction are decoded + decoderService.addDefault(IS_SIMD_ADD, False) + + //Specify the instruction decoding which should be applied when the instruction match the 'key' parttern + decoderService.add( + //Bit pattern of the new SIMD_ADD instruction + key = M"0000011----------000-----0110011", + + //Decoding specification when the 'key' pattern is recognized in the instruction + List( + IS_SIMD_ADD -> True, + REGFILE_WRITE_VALID -> True, //Enable the register file write + BYPASSABLE_EXECUTE_STAGE -> True, //Notify the hazard management unit that the instruction result is already accessible in the EXECUTE stage (Bypass ready) + BYPASSABLE_MEMORY_STAGE -> True, //Same as above but for the memory stage + RS1_USE -> True, //Notify the hazard management unit that this instruction use the RS1 value + RS2_USE -> True //Same than above but for RS2. + ) + ) + } + + override def build(pipeline: VexRiscv): Unit = { + import pipeline._ + import pipeline.config._ + + //Add a new scope on the execute stage (used to give a name to signals) + execute plug new Area { + //Define some signals used internally to the plugin + val rs1 = execute.input(RS1).asUInt //32 bits UInt value of the regfile[RS1] + val rs2 = execute.input(RS2).asUInt + val rd = UInt(32 bits) + + //Do some computation + rd(7 downto 0) := rs1(7 downto 0) + rs2(7 downto 0) + rd(16 downto 8) := rs1(16 downto 8) + rs2(16 downto 8) + rd(23 downto 16) := rs1(23 downto 16) + rs2(23 downto 16) + rd(31 downto 24) := rs1(31 downto 24) + rs2(31 downto 24) + + //When the instruction is a SIMD_ADD one, then write the result into the register file data path. + when(execute.input(IS_SIMD_ADD)) { + execute.output(REGFILE_WRITE_DATA) := rd.asBits + } + } + } +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/FormalSimple.scala b/VexRiscv/src/main/scala/vexriscv/demo/FormalSimple.scala new file mode 100644 index 0000000..9a4167e --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/FormalSimple.scala @@ -0,0 +1,65 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object FormalSimple extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new FormalPlugin, + new HaltOnExceptionPlugin, + new IBusSimplePlugin( + resetVector = 0x00000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = DYNAMIC_TARGET, + catchAccessFault = false, + compressedGen = true + ), + new DBusSimplePlugin( + catchAddressMisaligned = true, + catchAccessFault = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true, + forceLegalInstructionComputation = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = false + ), + new FullBarrelShifterPlugin, + new HazardSimplePlugin( + bypassExecute = false, + bypassMemory = false, + bypassWriteBack = false, + bypassWriteBackBuffer = false, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + SpinalConfig( + defaultConfigForClockDomains = ClockDomainConfig( + resetKind = spinal.core.SYNC, + resetActiveLevel = spinal.core.HIGH + ) + ).generateVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenCustomCsr.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenCustomCsr.scala new file mode 100644 index 0000000..11db86d --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenCustomCsr.scala @@ -0,0 +1,62 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 15.06.17. + */ + +//make clean run DBUS=SIMPLE IBUS=SIMPLE CSR=no MMU=no DEBUG_PLUGIN=no MUL=no DIV=no CUSTOM_CSR=yes +object GenCustomCsr extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new CustomCsrDemoPlugin, + new CsrPlugin(CsrPluginConfig.small), + new CustomCsrDemoGpioPlugin, + new IBusSimplePlugin( + resetVector = 0x00000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = false + ), + new FullBarrelShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenCustomInterrupt.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenCustomInterrupt.scala new file mode 100644 index 0000000..d0d9e48 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenCustomInterrupt.scala @@ -0,0 +1,72 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 15.06.17. + */ +object GenCustomInterrupt extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new UserInterruptPlugin( + interruptName = "miaou", + code = 20 + ), + new UserInterruptPlugin( + interruptName = "rawrrr", + code = 24 + ), + new CsrPlugin( + CsrPluginConfig.smallest.copy( + xtvecModeGen = true, + mtvecAccess = CsrAccess.WRITE_ONLY + ) + ), + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenCustomSimdAdd.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenCustomSimdAdd.scala new file mode 100644 index 0000000..8d9d6be --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenCustomSimdAdd.scala @@ -0,0 +1,58 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 15.06.17. + */ +object GenCustomSimdAdd extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new SimdAddPlugin, + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = false + ), + new FullBarrelShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenDeterministicVex.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenDeterministicVex.scala new file mode 100644 index 0000000..943ba16 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenDeterministicVex.scala @@ -0,0 +1,66 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 15.06.17. + */ +object GenDeterministicVex extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = STATIC, + catchAccessFault = true, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = true, + catchAccessFault = true, + earlyInjection = false + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin(earlyInjection = true), + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulPlugin, + new DivPlugin, + new CsrPlugin(CsrPluginConfig.small), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = true, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenFull.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenFull.scala new file mode 100644 index 0000000..eb1dba3 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenFull.scala @@ -0,0 +1,92 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object GenFull extends App{ + def config = VexRiscvConfig( + plugins = List( + new IBusCachedPlugin( + prediction = DYNAMIC, + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine =32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = true, + twoCycleCache = true + ), + memoryTranslatorPortConfig = MmuPortConfig( + portTlbSize = 4 + ) + ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ), + memoryTranslatorPortConfig = MmuPortConfig( + portTlbSize = 6 + ) + ), + new MmuPlugin( + virtualRange = _(31 downto 28) === 0xC, + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulPlugin, + new DivPlugin, + new CsrPlugin(CsrPluginConfig.small(0x80000020l)), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + + def cpu() = new VexRiscv( + config + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmu.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmu.scala new file mode 100644 index 0000000..00ba8c9 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmu.scala @@ -0,0 +1,87 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object GenFullNoMmu extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new PcManagerSimplePlugin( + resetVector = 0x80000000l, + relaxedPcCalculation = false + ), + new IBusCachedPlugin( + prediction = STATIC, + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine =32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = true, + twoCycleCache = true + ) + ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ) + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulPlugin, + new DivPlugin, + new CsrPlugin(CsrPluginConfig.small), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuMaxPerf.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuMaxPerf.scala new file mode 100644 index 0000000..6c892f0 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuMaxPerf.scala @@ -0,0 +1,88 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 15.06.17. + */ +object GenFullNoMmuMaxPerf extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new PcManagerSimplePlugin( + resetVector = 0x80000000l, + relaxedPcCalculation = false + ), + new IBusCachedPlugin( + prediction = DYNAMIC_TARGET, + historyRamSizeLog2 = 8, + config = InstructionCacheConfig( + cacheSize = 4096*2, + bytePerLine =32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = false, + twoCycleCache = true + ) + ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096*2, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ) + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin(earlyInjection = true), + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulPlugin, + new DivPlugin, + new CsrPlugin(CsrPluginConfig.small), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuNoCache.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuNoCache.scala new file mode 100644 index 0000000..77ed87a --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuNoCache.scala @@ -0,0 +1,63 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object GenFullNoMmuNoCache extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = STATIC, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulPlugin, + new DivPlugin, + new CsrPlugin(CsrPluginConfig.small), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuNoCacheSimpleMul.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuNoCacheSimpleMul.scala new file mode 100644 index 0000000..f1e9874 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenFullNoMmuNoCacheSimpleMul.scala @@ -0,0 +1,63 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object GenFullNoMmuNoCacheSimpleMul extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = STATIC, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulSimplePlugin, + new DivPlugin, + new CsrPlugin(CsrPluginConfig.small), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenMicroNoCsr.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenMicroNoCsr.scala new file mode 100644 index 0000000..bcd1b77 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenMicroNoCsr.scala @@ -0,0 +1,61 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object GenMicroNoCsr extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + withMemoryStage = false, + withWriteBackStage = false, + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false, + earlyInjection = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false, + writeRfInMemoryStage = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = false + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = false, + bypassMemory = false, + bypassWriteBack = false, + bypassWriteBackBuffer = false, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = true, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + SpinalConfig(mergeAsyncProcess = false).generateVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenNoCacheNoMmuMaxPerf.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenNoCacheNoMmuMaxPerf.scala new file mode 100644 index 0000000..9bca107 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenNoCacheNoMmuMaxPerf.scala @@ -0,0 +1,68 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 15.06.17. + */ +object GenNoCacheNoMmuMaxPerf extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = DYNAMIC_TARGET, + historyRamSizeLog2 = 8, + catchAccessFault = true, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = true, + catchAccessFault = true, + earlyInjection = false + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin(earlyInjection = true), + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulPlugin, + new MulDivIterativePlugin(genMul = false, genDiv = true, mulUnrollFactor = 1, divUnrollFactor = 1,dhrystoneOpt = false), + new CsrPlugin(CsrPluginConfig.small), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenSecure.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenSecure.scala new file mode 100644 index 0000000..8b2cd55 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenSecure.scala @@ -0,0 +1,87 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +object GenSecure extends App { + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusCachedPlugin( + resetVector = 0x80000000l, + prediction = STATIC, + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = true, + twoCycleCache = true + ) + ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ) + ), + new PmpPlugin( + regions = 16, + granularity = 32, + ioRange = _(31 downto 28) === 0xf + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulDivIterativePlugin( + genMul = true, + genDiv = true, + mulUnrollFactor = 1, + divUnrollFactor = 1 + ), + new CsrPlugin(CsrPluginConfig.secure(0x00000020l)), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductive.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductive.scala new file mode 100644 index 0000000..9bd6f72 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductive.scala @@ -0,0 +1,59 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object GenSmallAndProductive extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new CsrPlugin(CsrPluginConfig.smallest), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveCfu.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveCfu.scala new file mode 100644 index 0000000..d28e318 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveCfu.scala @@ -0,0 +1,87 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 15.06.17. + */ +object GenSmallAndProductiveCfu extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new CsrPlugin(CsrPluginConfig.smallest), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new CfuPlugin( + stageCount = 1, + allowZeroLatency = true, + encodings = List( + CfuPluginEncoding ( + instruction = M"-------------------------0001011", + functionId = List(14 downto 12), + input2Kind = CfuPlugin.Input2Kind.RS + ) + ), + busParameter = CfuBusParameter( + CFU_VERSION = 0, + CFU_INTERFACE_ID_W = 0, + CFU_FUNCTION_ID_W = 3, + CFU_REORDER_ID_W = 0, + CFU_REQ_RESP_ID_W = 0, + CFU_INPUTS = 2, + CFU_INPUT_DATA_W = 32, + CFU_OUTPUTS = 1, + CFU_OUTPUT_DATA_W = 32, + CFU_FLOW_REQ_READY_ALWAYS = false, + CFU_FLOW_RESP_READY_ALWAYS = false, + CFU_WITH_STATUS = true, + CFU_RAW_INSN_W = 32, + CFU_CFU_ID_W = 4, + CFU_STATE_INDEX_NUM = 5 + ) + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveICache.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveICache.scala new file mode 100644 index 0000000..9cad30d --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveICache.scala @@ -0,0 +1,71 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} +import spinal.core._ +import vexriscv.ip.InstructionCacheConfig + +/** + * Created by spinalvm on 15.06.17. + */ +object GenSmallAndProductiveICache extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new PcManagerSimplePlugin( + resetVector = 0x80000000l, + relaxedPcCalculation = false + ), + new IBusCachedPlugin( + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = false, + catchAccessFault = false, + asyncTagMemory = false, + twoCycleRam = false, + twoCycleCache = true + ) + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new CsrPlugin(CsrPluginConfig.smallest), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveVfu.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveVfu.scala new file mode 100644 index 0000000..81ca61b --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallAndProductiveVfu.scala @@ -0,0 +1,64 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 15.06.17. + */ +object GenSmallAndProductiveVfu extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new CsrPlugin(CsrPluginConfig.smallest), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new VfuPlugin( + stageCount = 2, + allowZeroLatency = false, + parameter = VfuParameter() + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenSmallest.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallest.scala new file mode 100644 index 0000000..9813ccf --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallest.scala @@ -0,0 +1,59 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object GenSmallest extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new CsrPlugin(CsrPluginConfig.smallest), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = false + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = false, + bypassMemory = false, + bypassWriteBack = false, + bypassWriteBackBuffer = false, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + SpinalVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenSmallestNoCsr.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallestNoCsr.scala new file mode 100644 index 0000000..cd1ee31 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenSmallestNoCsr.scala @@ -0,0 +1,64 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.{plugin, VexRiscv, VexRiscvConfig} +import spinal.core._ + +/** + * Created by spinalvm on 15.06.17. + */ +object GenSmallestNoCsr extends App{ + def cpu() = new VexRiscv( + config = VexRiscvConfig( + plugins = List( +// new PcManagerSimplePlugin( +// resetVector = 0x00000000l, +// relaxedPcCalculation = false +// ), + + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false, + earlyInjection = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false, + writeRfInMemoryStage = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = false + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = false, + bypassMemory = false, + bypassWriteBack = false, + bypassWriteBackBuffer = false, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + SpinalConfig(mergeAsyncProcess = false).generateVerilog(cpu()) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/GenTwoThreeStage.scala b/VexRiscv/src/main/scala/vexriscv/demo/GenTwoThreeStage.scala new file mode 100644 index 0000000..c3dd0db --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/GenTwoThreeStage.scala @@ -0,0 +1,72 @@ +package vexriscv.demo + +import spinal.core.SpinalVerilog +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} +import vexriscv.plugin.{BranchPlugin, CsrPlugin, CsrPluginConfig, DBusSimplePlugin, DecoderSimplePlugin, DivPlugin, FullBarrelShifterPlugin, HazardSimplePlugin, IBusSimplePlugin, IntAluPlugin, LightShifterPlugin, MulPlugin, MulSimplePlugin, NONE, RegFilePlugin, SrcPlugin, YamlPlugin} + +object GenTwoThreeStage extends App{ + def cpu(withMulDiv : Boolean, + bypass : Boolean, + barrielShifter : Boolean, + withMemoryStage : Boolean) = new VexRiscv( + config = VexRiscvConfig( + withMemoryStage = withMemoryStage, + withWriteBackStage = false, + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false, + injectorStage = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new CsrPlugin(CsrPluginConfig.smallest), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + readInExecute = true, + zeroBoot = true, + x0Init = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new HazardSimplePlugin( + bypassExecute = bypass, + bypassMemory = bypass, + bypassWriteBack = bypass, + bypassWriteBackBuffer = bypass, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = true, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) ++ (if(!withMulDiv) Nil else List( + new MulSimplePlugin, + new DivPlugin + )) ++ List(if(!barrielShifter) + new LightShifterPlugin + else + new FullBarrelShifterPlugin( + earlyInjection = true + ) + ) + ) + ) + + SpinalVerilog(cpu(true,true,true,true)) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/Linux.scala b/VexRiscv/src/main/scala/vexriscv/demo/Linux.scala new file mode 100644 index 0000000..8508a67 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/Linux.scala @@ -0,0 +1,514 @@ +/* + * SpinalHDL + * Copyright (c) Dolu, All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3.0 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library. + */ + +package vexriscv.demo + +import spinal.core._ +import spinal.lib.eda.bench.{AlteraStdTargets, Bench, Rtl, XilinxStdTargets} +import spinal.lib.eda.icestorm.IcestormStdTargets +import spinal.lib.master +import vexriscv._ +import vexriscv.ip._ +import vexriscv.plugin._ + +/* +prerequired stuff => +- JAVA JDK >= 8 +- SBT +- Verilator + +Setup things => +git clone https://github.com/SpinalHDL/SpinalHDL.git -b dev +git clone https://github.com/SpinalHDL/VexRiscv.git -b linux +cd VexRiscv + +Run regressions => +sbt "runMain vexriscv.demo.LinuxGen -r" +cd src/test/cpp/regression +make clean run IBUS=CACHED DBUS=CACHED DEBUG_PLUGIN=STD DHRYSTONE=yes SUPERVISOR=yes MMU=yes CSR=yes DEBUG_PLUGIN=no COMPRESSED=no MUL=yes DIV=yes LRSC=yes AMO=yes REDO=10 TRACE=no COREMARK=yes LINUX_REGRESSION=yes + +Run linux in simulation (Require the machine mode emulator compiled in SIM mode) => +sbt "runMain vexriscv.demo.LinuxGen" +cd src/test/cpp/regression +export BUILDROOT=/home/miaou/pro/riscv/buildrootSpinal +make clean run IBUS=CACHED DBUS=CACHED DEBUG_PLUGIN=STD SUPERVISOR=yes CSR=yes DEBUG_PLUGIN=no COMPRESSED=no LRSC=yes AMO=yes REDO=0 DHRYSTONE=no LINUX_SOC=yes EMULATOR=../../../main/c/emulator/build/emulator.bin VMLINUX=$BUILDROOT/output/images/Image DTB=$BUILDROOT/board/spinal/vexriscv_sim/rv32.dtb RAMDISK=$BUILDROOT/output/images/rootfs.cpio WITH_USER_IO=yes TRACE=no FLOW_INFO=no + +Run linux with QEMU (Require the machine mode emulator compiled in QEMU mode) +export BUILDROOT=/home/miaou/pro/riscv/buildrootSpinal +qemu-system-riscv32 -nographic -machine virt -m 1536M -device loader,file=src/main/c/emulator/build/emulator.bin,addr=0x80000000,cpu-num=0 -device loader,file=$BUILDROOT/board/spinal/vexriscv_sim/rv32.dtb,addr=0xC3000000 -device loader,file=$BUILDROOT/output/images/Image,addr=0xC0000000 -device loader,file=$BUILDROOT/output/images/rootfs.cpio,addr=0xc2000000 + + +Buildroot => +git clone https://github.com/SpinalHDL/buildroot.git -b vexriscv +cd buildroot +make spinal_vexriscv_sim_defconfig +make -j$(nproc) +output/host/bin/riscv32-linux-objcopy -O binary output/images/vmlinux output/images/Image + +After changing a kernel config into buildroot => +cd buildroot +make spinal_vexriscv_sim_defconfig +make linux-dirclean linux-rebuild -j8 +output/host/bin/riscv32-linux-objcopy -O binary output/images/vmlinux output/images/Image + +Compiling the machine mode emulator (check the config.h file to know the mode) => +cd src/main/c/emulator +make clean all + +Changing the emulator mode => +Edit the src/main/c/emulator/src/config.h file, and comment/uncomment the SIM/QEMU flags + +Other commands (Memo): +decompile file and split it +riscv64-unknown-elf-objdump -S -d vmlinux > vmlinux.asm; split -b 1M vmlinux.asm + +Kernel compilation command => +ARCH=riscv CROSS_COMPILE=riscv32-unknown-linux-gnu- make menuconfig +ARCH=riscv CROSS_COMPILE=riscv32-unknown-linux-gnu- make -j`nproc`; riscv32-unknown-linux-gnu-objcopy -O binary vmlinux vmlinux.bin + +Generate a DTB from a DTS => +dtc -O dtb -o rv32.dtb rv32.dts + +https://github.com/riscv/riscv-qemu/wiki#build-and-install + + +memo : +export DATA=/home/miaou/Downloads/Binaries-master +cd src/test/cpp/regression +rm VexRiscv.v +cp $DATA/VexRiscv.v ../../../.. +make run IBUS=CACHED DBUS=CACHED DEBUG_PLUGIN=STD SUPERVISOR=yes CSR=yes COMPRESSED=no LRSC=yes AMO=yes REDO=0 DHRYSTONE=no LINUX_SOC=yes EMULATOR=$DATA/emulator.bin VMLINUX=$DATA/vmlinux.bin DTB=$DATA/rv32.dtb RAMDISK=$DATA/rootfs.cpio TRACE=no FLOW_INFO=no + +make clean run IBUS=CACHED DBUS=CACHED DEBUG_PLUGIN=STD DHRYSTONE=no SUPERVISOR=yes CSR=yes COMPRESSED=no MUL=yes DIV=yes LRSC=yes AMO=yes MMU=yes REDO=1 TRACE=no LINUX_REGRESSION=yes + +qemu-system-riscv32 -nographic -machine virt -m 1536M -device loader,file=$DATA/emulator.bin,addr=0x80000000,cpu-num=0 -device loader,file=$DATA/rv32.dtb,addr=0xC3000000 -device loader,file=$DATA/vmlinux.bin,addr=0xC0000000 -device loader,file=$DATA/rootfs.cpio,addr=0xc2000000 + + +make run IBUS=CACHED DBUS=CACHED DEBUG_PLUGIN=STD DHRYSTONE=yess SUPERVISOR=yes CSR=yes COMPRESSED=yes MUL=yes DIV=yes LRSC=yes AMO=yes REDO=1 TRACE=no LINUX_REGRESSION=yes + +program ../../../main/c/emulator/build/emulator.bin 0x80000000 verify + soc.loadBin(EMULATOR, 0x80000000); + soc.loadBin(VMLINUX, 0xC0000000); + soc.loadBin(DTB, 0xC3000000); + soc.loadBin(RAMDISK, 0xC2000000); + +export BUILDROOT=/home/miaou/pro/riscv/buildrootSpinal +make run IBUS=CACHED DBUS=CACHED DEBUG_PLUGIN=STD SUPERVISOR=yes CSR=yes COMPRESSED=no LRSC=yes AMO=yes REDO=0 DHRYSTONE=no LINUX_SOC=yes +EMULATOR=../../../main/c/emulator/build/emulator.bin +VMLINUX=/home/miaou/pro/riscv/buildrootSpinal/output/images/Image +DTB=/home/miaou/pro/riscv/buildrootSpinal/board/spinal/vexriscv_sim/rv32.dtb +RAMDISK=/home/miaou/pro/riscv/buildrootSpinal/output/images/rootfs.cpio TRACE=no FLOW_INFO=no + +make run IBUS=CACHED DBUS=CACHED DEBUG_PLUGIN=STD SUPERVISOR=yes CSR=yes COMPRESSED=no LRSC=yes AMO=yes REDO=0 DHRYSTONE=no LINUX_SOC=yes DEBUG_PLUGIN_EXTERNAL=yes + +rm -rf cpio +mkdir cpio +cd cpio +sudo cpio -i < ../rootfs.cpio +cd .. + +rm rootfs.cpio +cd cpio +sudo find | sudo cpio -H newc -o > ../rootfs.cpio +cd .. + +make clean run IBUS=CACHED DBUS=CACHED DEBUG_PLUGIN=STD DHRYSTONE=yes SUPERVISOR=yes MMU=yes CSR=yes COMPRESSED=no MUL=yes DIV=yes LRSC=yes AMO=yes REDO=10 TRACE=no COREMARK=yes LINUX_REGRESSION=yes RUN_HEX=~/pro/riscv/zephyr/samples/synchronization/build/zephyr/zephyr.hex + + +*/ + + +object LinuxGen { + def configFull(litex : Boolean, withMmu : Boolean, withSmp : Boolean = false) = { + val config = VexRiscvConfig( + plugins = List( + //Uncomment the whole IBusSimplePlugin and comment IBusCachedPlugin if you want uncached iBus config +// new IBusSimplePlugin( +// resetVector = 0x80000000l, +// cmdForkOnSecondStage = false, +// cmdForkPersistence = false, +// prediction = DYNAMIC_TARGET, +// historyRamSizeLog2 = 10, +// catchAccessFault = true, +// compressedGen = true, +// busLatencyMin = 1, +// injectorStage = true, +// memoryTranslatorPortConfig = withMmu generate MmuPortConfig( +// portTlbSize = 4 +// ) +// ), + + //Uncomment the whole IBusCachedPlugin and comment IBusSimplePlugin if you want cached iBus config + new IBusCachedPlugin( + resetVector = 0x80000000l, + compressedGen = false, + prediction = STATIC, + injectorStage = false, + config = InstructionCacheConfig( + cacheSize = 4096*1, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = false, + twoCycleCache = true +// ) + ), + memoryTranslatorPortConfig = withMmu generate MmuPortConfig( + portTlbSize = 4 + ) + ), + // ).newTightlyCoupledPort(TightlyCoupledPortParameter("iBusTc", a => a(30 downto 28) === 0x0 && a(5))), +// new DBusSimplePlugin( +// catchAddressMisaligned = true, +// catchAccessFault = true, +// earlyInjection = false, +// withLrSc = true, +// memoryTranslatorPortConfig = withMmu generate MmuPortConfig( +// portTlbSize = 4 +// ) +// ), + new DBusCachedPlugin( + dBusCmdMasterPipe = true, + dBusCmdSlavePipe = true, + dBusRspSlavePipe = true, + config = new DataCacheConfig( + cacheSize = 4096*1, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true, + withExclusive = withSmp, + withInvalidate = withSmp, + withLrSc = true, + withAmo = true +// ) + ), + memoryTranslatorPortConfig = withMmu generate MmuPortConfig( + portTlbSize = 4 + ) + ), + + // new MemoryTranslatorPlugin( + // tlbSize = 32, + // virtualRange = _(31 downto 28) === 0xC, + // ioRange = _(31 downto 28) === 0xF + // ), + + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = true + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false + ), + new FullBarrelShifterPlugin(earlyInjection = false), + // new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + // new HazardSimplePlugin(false, true, false, true), + // new HazardSimplePlugin(false, false, false, false), + new MulPlugin, + new MulDivIterativePlugin( + genMul = false, + genDiv = true, + mulUnrollFactor = 32, + divUnrollFactor = 1 + ), + // new DivPlugin, + new CsrPlugin(CsrPluginConfig.linuxMinimal(0x80000020l).copy(ebreakGen = false)), + // new CsrPlugin(//CsrPluginConfig.all2(0x80000020l).copy(ebreakGen = true)/* + // CsrPluginConfig( + // catchIllegalAccess = false, + // mvendorid = null, + // marchid = null, + // mimpid = null, + // mhartid = null, + // misaExtensionsInit = 0, + // misaAccess = CsrAccess.READ_ONLY, + // mtvecAccess = CsrAccess.WRITE_ONLY, + // mtvecInit = 0x80000020l, + // mepcAccess = CsrAccess.READ_WRITE, + // mscratchGen = true, + // mcauseAccess = CsrAccess.READ_ONLY, + // mbadaddrAccess = CsrAccess.READ_ONLY, + // mcycleAccess = CsrAccess.NONE, + // minstretAccess = CsrAccess.NONE, + // ecallGen = true, + // ebreakGen = true, + // wfiGenAsWait = false, + // wfiGenAsNop = true, + // ucycleAccess = CsrAccess.NONE + // )), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true, + fenceiGenAsAJump = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + if(withMmu) config.plugins += new MmuPlugin( + ioRange = (x => if(litex) x(31 downto 28) === 0xB || x(31 downto 28) === 0xE || x(31 downto 28) === 0xF else x(31 downto 28) === 0xF) + ) else { + config.plugins += new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ) + } + config + } + + + + def main(args: Array[String]) { +// import spinal.core.sim._ +// SimConfig.withConfig(SpinalConfig(mergeAsyncProcess = false, anonymSignalPrefix = "zz_")).allOptimisation.compile(new VexRiscv(configFull)).doSimUntilVoid{ dut => +// dut.clockDomain.forkStimulus(10) +// dut.clockDomain.forkSimSpeedPrinter(4) +// var iBus : InstructionCacheMemBus = null +// +// dut.plugins.foreach{ +// case plugin: IBusCachedPlugin => iBus = plugin.iBus +// case _ => +// } +// dut.clockDomain.onSamplings{ +//// iBus.cmd.ready.randomize() +// iBus.rsp.data #= 0x13 +// } +// } + + SpinalConfig(mergeAsyncProcess = false, anonymSignalPrefix = "_zz").generateVerilog { + + + val toplevel = new VexRiscv(configFull( + litex = !args.contains("-r"), + withMmu = true + )) +// val toplevel = new VexRiscv(configLight) +// val toplevel = new VexRiscv(configTest) + + /*toplevel.rework { + var iBus : AvalonMM = null + for (plugin <- toplevel.config.plugins) plugin match { + case plugin: IBusSimplePlugin => { + plugin.iBus.asDirectionLess() //Unset IO properties of iBus + iBus = master(plugin.iBus.toAvalon()) + .setName("iBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) + } + case plugin: IBusCachedPlugin => { + plugin.iBus.asDirectionLess() //Unset IO properties of iBus + iBus = master(plugin.iBus.toAvalon()) + .setName("iBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) + } + case plugin: DBusSimplePlugin => { + plugin.dBus.asDirectionLess() + master(plugin.dBus.toAvalon()) + .setName("dBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) + } + case plugin: DBusCachedPlugin => { + plugin.dBus.asDirectionLess() + master(plugin.dBus.toAvalon()) + .setName("dBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) + } + case plugin: DebugPlugin => { + plugin.io.bus.asDirectionLess() + slave(plugin.io.bus.fromAvalon()) + .setName("debugBusAvalon") + .addTag(ClockDomainTag(plugin.debugClockDomain)) + .parent = null //Avoid the io bundle to be interpreted as a QSys conduit + plugin.io.resetOut + .addTag(ResetEmitterTag(plugin.debugClockDomain)) + .parent = null //Avoid the io bundle to be interpreted as a QSys conduit + } + case _ => + } + for (plugin <- toplevel.config.plugins) plugin match { + case plugin: CsrPlugin => { + plugin.externalInterrupt + .addTag(InterruptReceiverTag(iBus, ClockDomain.current)) + plugin.timerInterrupt + .addTag(InterruptReceiverTag(iBus, ClockDomain.current)) + } + case _ => + } + }*/ +// toplevel.writeBack.input(config.PC).addAttribute(Verilator.public) +// toplevel.service(classOf[DecoderSimplePlugin]).bench(toplevel) + // toplevel.children.find(_.isInstanceOf[DataCache]).get.asInstanceOf[DataCache].io.cpu.execute.addAttribute(Verilator.public) + + +// toplevel.rework { +// for (plugin <- toplevel.config.plugins) plugin match { +// case plugin: IBusSimplePlugin => { +// plugin.iBus.setAsDirectionLess().unsetName() //Unset IO properties of iBus +// val iBus = master(IBusSimpleBus()).setName("iBus") +// +// iBus.cmd << plugin.iBus.cmd.halfPipe() +// iBus.rsp.stage >> plugin.iBus.rsp +// } +// case plugin: DBusSimplePlugin => { +// plugin.dBus.setAsDirectionLess().unsetName() +// val dBus = master(DBusSimpleBus()).setName("dBus") +// val pending = RegInit(False) setWhen(plugin.dBus.cmd.fire) clearWhen(plugin.dBus.rsp.ready) +// dBus.cmd << plugin.dBus.cmd.haltWhen(pending).halfPipe() +// plugin.dBus.rsp := RegNext(dBus.rsp) +// plugin.dBus.rsp.ready clearWhen(!pending) +// } +// +// case _ => +// } +// } + + toplevel + } + } +} + +object LinuxSyntesisBench extends App{ + val withoutMmu = new Rtl { + override def getName(): String = "VexRiscv Without Mmu" + override def getRtlPath(): String = "VexRiscvWithoutMmu.v" + SpinalConfig(inlineRom=true).generateVerilog(new VexRiscv(LinuxGen.configFull(litex = false, withMmu = false)).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val withMmu = new Rtl { + override def getName(): String = "VexRiscv With Mmu" + override def getRtlPath(): String = "VexRiscvWithMmu.v" + SpinalConfig(inlineRom=true).generateVerilog(new VexRiscv(LinuxGen.configFull(litex = false, withMmu = true)).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val rtls = List(withoutMmu,withMmu) + // val rtls = List(smallestNoCsr, smallest, smallAndProductive, smallAndProductiveWithICache) + // val rtls = List(smallAndProductive, smallAndProductiveWithICache, fullNoMmuMaxPerf, fullNoMmu, full) + // val rtls = List(fullNoMmu) + + val targets = XilinxStdTargets( + vivadoArtix7Path = "/media/miaou/HD/linux/Xilinx/Vivado/2018.3/bin" + ) ++ AlteraStdTargets( + quartusCycloneIVPath = "/media/miaou/HD/linux/intelFPGA_lite/18.1/quartus/bin", + quartusCycloneVPath = "/media/miaou/HD/linux/intelFPGA_lite/18.1/quartus/bin" + ) //++ IcestormStdTargets().take(1) + + Bench(rtls, targets, "/media/miaou/HD/linux/tmp") +} + +object LinuxSim extends App{ + import spinal.core.sim._ + + SimConfig.allOptimisation.compile(new VexRiscv(LinuxGen.configFull(litex = false, withMmu = true))).doSim{dut => +// dut.clockDomain.forkStimulus(10) +// dut.clockDomain.forkSimSpeedPrinter() +// dut.plugins.foreach{ +// case p : IBusSimplePlugin => dut.clockDomain.onRisingEdges{ +// p.iBus.cmd.ready #= ! p.iBus.cmd.ready.toBoolean +//// p.iBus.rsp.valid.randomize() +//// p.iBus.rsp.inst.randomize() +//// p.iBus.rsp.error.randomize() +// } +// case p : DBusSimplePlugin => dut.clockDomain.onRisingEdges{ +// p.dBus.cmd.ready #= ! p.dBus.cmd.ready.toBoolean +//// p.dBus.cmd.ready.randomize() +//// p.dBus.rsp.ready.randomize() +//// p.dBus.rsp.data.randomize() +//// p.dBus.rsp.error.randomize() +// } +// case _ => +// } +// sleep(10*10000000) + + + var cycleCounter = 0l + var lastTime = System.nanoTime() + + + + + var iBus : IBusSimpleBus = null + var dBus : DBusSimpleBus = null + dut.plugins.foreach{ + case p : IBusSimplePlugin => + iBus = p.iBus +// p.iBus.rsp.valid.randomize() +// p.iBus.rsp.inst.randomize() +// p.iBus.rsp.error.randomize() + case p : DBusSimplePlugin => + dBus = p.dBus +// p.dBus.cmd.ready.randomize() +// p.dBus.rsp.ready.randomize() +// p.dBus.rsp.data.randomize() +// p.dBus.rsp.error.randomize() + case _ => + } + + dut.clockDomain.resetSim #= false + dut.clockDomain.clockSim #= false + sleep(1) + dut.clockDomain.resetSim #= true + sleep(1) + + def f(): Unit ={ + cycleCounter += 1 + + if((cycleCounter & 8191) == 0){ + val currentTime = System.nanoTime() + val deltaTime = (currentTime - lastTime)*1e-9 + if(deltaTime > 2.0) { + println(f"[Info] Simulation speed : ${cycleCounter / deltaTime * 1e-3}%4.0f kcycles/s") + lastTime = currentTime + cycleCounter = 0 + } + } + dut.clockDomain.clockSim #= false + iBus.cmd.ready #= ! iBus.cmd.ready.toBoolean + dBus.cmd.ready #= ! dBus.cmd.ready.toBoolean + delayed(1)(f2) + } + def f2(): Unit ={ + dut.clockDomain.clockSim #= true + delayed(1)(f) + } + + delayed(1)(f) + + sleep(100000000) + } +}
\ No newline at end of file diff --git a/VexRiscv/src/main/scala/vexriscv/demo/Murax.scala b/VexRiscv/src/main/scala/vexriscv/demo/Murax.scala new file mode 100644 index 0000000..dbff45b --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/Murax.scala @@ -0,0 +1,589 @@ +package vexriscv.demo + +import spinal.core._ +import spinal.lib._ +import spinal.lib.bus.amba3.apb._ +import spinal.lib.bus.misc.SizeMapping +import spinal.lib.bus.simple.PipelinedMemoryBus +import spinal.lib.com.jtag.Jtag +import spinal.lib.com.spi.ddr.SpiXdrMaster +import spinal.lib.com.uart._ +import spinal.lib.io.{InOutWrapper, TriStateArray} +import spinal.lib.misc.{InterruptCtrl, Prescaler, Timer} +import spinal.lib.soc.pinsec.{PinsecTimerCtrl, PinsecTimerCtrlExternal} +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} +import spinal.lib.com.spi.ddr._ +import spinal.lib.bus.simple._ +import scala.collection.mutable.ArrayBuffer +import scala.collection.Seq + +/** + * Created by PIC32F_USER on 28/07/2017. + * + * Murax is a very light SoC which could work without any external component. + * - ICE40-hx8k + icestorm => 53 Mhz, 2142 LC + * - 0.37 DMIPS/Mhz + * - 8 kB of on-chip ram + * - JTAG debugger (eclipse/GDB/openocd ready) + * - Interrupt support + * - APB bus for peripherals + * - 32 GPIO pin + * - one 16 bits prescaler, two 16 bits timers + * - one UART with tx/rx fifo + */ + + +case class MuraxConfig(coreFrequency : HertzNumber, + onChipRamSize : BigInt, + onChipRamHexFile : String, + pipelineDBus : Boolean, + pipelineMainBus : Boolean, + pipelineApbBridge : Boolean, + gpioWidth : Int, + uartCtrlConfig : UartCtrlMemoryMappedConfig, + xipConfig : SpiXdrMasterCtrl.MemoryMappingParameters, + hardwareBreakpointCount : Int, + cpuPlugins : ArrayBuffer[Plugin[VexRiscv]]){ + require(pipelineApbBridge || pipelineMainBus, "At least pipelineMainBus or pipelineApbBridge should be enable to avoid wipe transactions") + val genXip = xipConfig != null + +} + + + +object MuraxConfig{ + def default : MuraxConfig = default(false, false) + def default(withXip : Boolean = false, bigEndian : Boolean = false) = MuraxConfig( + coreFrequency = 12 MHz, + onChipRamSize = 8 kB, + onChipRamHexFile = null, + pipelineDBus = true, + pipelineMainBus = false, + pipelineApbBridge = true, + gpioWidth = 32, + xipConfig = ifGen(withXip) (SpiXdrMasterCtrl.MemoryMappingParameters( + SpiXdrMasterCtrl.Parameters(8, 12, SpiXdrParameter(2, 2, 1)).addFullDuplex(0,1,false), + cmdFifoDepth = 32, + rspFifoDepth = 32, + xip = SpiXdrMasterCtrl.XipBusParameters(addressWidth = 24, lengthWidth = 2) + )), + hardwareBreakpointCount = if(withXip) 3 else 0, + cpuPlugins = ArrayBuffer( //DebugPlugin added by the toplevel + new IBusSimplePlugin( + resetVector = if(withXip) 0xF001E000l else 0x80000000l, + cmdForkOnSecondStage = true, + cmdForkPersistence = withXip, //Required by the Xip controller + prediction = NONE, + catchAccessFault = false, + compressedGen = false, + bigEndian = bigEndian + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false, + earlyInjection = false, + bigEndian = bigEndian + ), + new CsrPlugin(CsrPluginConfig.smallest(mtvecInit = if(withXip) 0xE0040020l else 0x80000020l)), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = false + ), + new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = false, + bypassMemory = false, + bypassWriteBack = false, + bypassWriteBackBuffer = false, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ), + uartCtrlConfig = UartCtrlMemoryMappedConfig( + uartCtrlConfig = UartCtrlGenerics( + dataWidthMax = 8, + clockDividerWidth = 20, + preSamplingSize = 1, + samplingSize = 3, + postSamplingSize = 1 + ), + initConfig = UartCtrlInitConfig( + baudrate = 115200, + dataLength = 7, //7 => 8 bits + parity = UartParityType.NONE, + stop = UartStopType.ONE + ), + busCanWriteClockDividerConfig = false, + busCanWriteFrameConfig = false, + txFifoDepth = 16, + rxFifoDepth = 16 + ) + + ) + + def fast = { + val config = default + + //Replace HazardSimplePlugin to get datapath bypass + config.cpuPlugins(config.cpuPlugins.indexWhere(_.isInstanceOf[HazardSimplePlugin])) = new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true + ) +// config.cpuPlugins(config.cpuPlugins.indexWhere(_.isInstanceOf[LightShifterPlugin])) = new FullBarrelShifterPlugin() + + config + } +} + + +case class Murax(config : MuraxConfig) extends Component{ + import config._ + + val io = new Bundle { + //Clocks / reset + val asyncReset = in Bool() + val mainClk = in Bool() + + //Main components IO + val jtag = slave(Jtag()) + + //Peripherals IO + val gpioA = master(TriStateArray(gpioWidth bits)) + val uart = master(Uart()) + + val xip = ifGen(genXip)(master(SpiXdrMaster(xipConfig.ctrl.spi))) + } + + + val resetCtrlClockDomain = ClockDomain( + clock = io.mainClk, + config = ClockDomainConfig( + resetKind = BOOT + ) + ) + + val resetCtrl = new ClockingArea(resetCtrlClockDomain) { + val mainClkResetUnbuffered = False + + //Implement an counter to keep the reset axiResetOrder high 64 cycles + // Also this counter will automatically do a reset when the system boot. + val systemClkResetCounter = Reg(UInt(6 bits)) init(0) + when(systemClkResetCounter =/= U(systemClkResetCounter.range -> true)){ + systemClkResetCounter := systemClkResetCounter + 1 + mainClkResetUnbuffered := True + } + when(BufferCC(io.asyncReset)){ + systemClkResetCounter := 0 + } + + //Create all reset used later in the design + val mainClkReset = RegNext(mainClkResetUnbuffered) + val systemReset = RegNext(mainClkResetUnbuffered) + } + + + val systemClockDomain = ClockDomain( + clock = io.mainClk, + reset = resetCtrl.systemReset, + frequency = FixedFrequency(coreFrequency) + ) + + val debugClockDomain = ClockDomain( + clock = io.mainClk, + reset = resetCtrl.mainClkReset, + frequency = FixedFrequency(coreFrequency) + ) + + val system = new ClockingArea(systemClockDomain) { + val pipelinedMemoryBusConfig = PipelinedMemoryBusConfig( + addressWidth = 32, + dataWidth = 32 + ) + + val bigEndianDBus = config.cpuPlugins.exists(_ match{ case plugin : DBusSimplePlugin => plugin.bigEndian case _ => false}) + + //Arbiter of the cpu dBus/iBus to drive the mainBus + //Priority to dBus, !! cmd transactions can change on the fly !! + val mainBusArbiter = new MuraxMasterArbiter(pipelinedMemoryBusConfig, bigEndianDBus) + + //Instanciate the CPU + val cpu = new VexRiscv( + config = VexRiscvConfig( + plugins = cpuPlugins += new DebugPlugin(debugClockDomain, hardwareBreakpointCount) + ) + ) + + //Checkout plugins used to instanciate the CPU to connect them to the SoC + val timerInterrupt = False + val externalInterrupt = False + for(plugin <- cpu.plugins) plugin match{ + case plugin : IBusSimplePlugin => + mainBusArbiter.io.iBus.cmd <> plugin.iBus.cmd + mainBusArbiter.io.iBus.rsp <> plugin.iBus.rsp + case plugin : DBusSimplePlugin => { + if(!pipelineDBus) + mainBusArbiter.io.dBus <> plugin.dBus + else { + mainBusArbiter.io.dBus.cmd << plugin.dBus.cmd.halfPipe() + mainBusArbiter.io.dBus.rsp <> plugin.dBus.rsp + } + } + case plugin : CsrPlugin => { + plugin.externalInterrupt := externalInterrupt + plugin.timerInterrupt := timerInterrupt + } + case plugin : DebugPlugin => plugin.debugClockDomain{ + resetCtrl.systemReset setWhen(RegNext(plugin.io.resetOut)) + io.jtag <> plugin.io.bus.fromJtag() + } + case _ => + } + + + + //****** MainBus slaves ******** + val mainBusMapping = ArrayBuffer[(PipelinedMemoryBus,SizeMapping)]() + val ram = new MuraxPipelinedMemoryBusRam( + onChipRamSize = onChipRamSize, + onChipRamHexFile = onChipRamHexFile, + pipelinedMemoryBusConfig = pipelinedMemoryBusConfig, + bigEndian = bigEndianDBus + ) + mainBusMapping += ram.io.bus -> (0x80000000l, onChipRamSize) + + val apbBridge = new PipelinedMemoryBusToApbBridge( + apb3Config = Apb3Config( + addressWidth = 20, + dataWidth = 32 + ), + pipelineBridge = pipelineApbBridge, + pipelinedMemoryBusConfig = pipelinedMemoryBusConfig + ) + mainBusMapping += apbBridge.io.pipelinedMemoryBus -> (0xF0000000l, 1 MB) + + + + //******** APB peripherals ********* + val apbMapping = ArrayBuffer[(Apb3, SizeMapping)]() + val gpioACtrl = Apb3Gpio(gpioWidth = gpioWidth, withReadSync = true) + io.gpioA <> gpioACtrl.io.gpio + apbMapping += gpioACtrl.io.apb -> (0x00000, 4 kB) + + val uartCtrl = Apb3UartCtrl(uartCtrlConfig) + uartCtrl.io.uart <> io.uart + externalInterrupt setWhen(uartCtrl.io.interrupt) + apbMapping += uartCtrl.io.apb -> (0x10000, 4 kB) + + val timer = new MuraxApb3Timer() + timerInterrupt setWhen(timer.io.interrupt) + apbMapping += timer.io.apb -> (0x20000, 4 kB) + + val xip = ifGen(genXip)(new Area{ + val ctrl = Apb3SpiXdrMasterCtrl(xipConfig) + ctrl.io.spi <> io.xip + externalInterrupt setWhen(ctrl.io.interrupt) + apbMapping += ctrl.io.apb -> (0x1F000, 4 kB) + + val accessBus = new PipelinedMemoryBus(PipelinedMemoryBusConfig(24,32)) + mainBusMapping += accessBus -> (0xE0000000l, 16 MB) + + ctrl.io.xip.fromPipelinedMemoryBus() << accessBus + val bootloader = Apb3Rom("src/main/c/murax/xipBootloader/crt.bin") + apbMapping += bootloader.io.apb -> (0x1E000, 4 kB) + }) + + + + //******** Memory mappings ********* + val apbDecoder = Apb3Decoder( + master = apbBridge.io.apb, + slaves = apbMapping.toSeq + ) + + val mainBusDecoder = new Area { + val logic = new MuraxPipelinedMemoryBusDecoder( + master = mainBusArbiter.io.masterBus, + specification = mainBusMapping.toSeq, + pipelineMaster = pipelineMainBus + ) + } + } +} + + + +object Murax{ + def main(args: Array[String]) { + SpinalVerilog(Murax(MuraxConfig.default)) + } +} + +object MuraxCfu{ + def main(args: Array[String]) { + SpinalVerilog{ + val config = MuraxConfig.default + config.cpuPlugins += new CfuPlugin( + stageCount = 1, + allowZeroLatency = true, + encodings = List( + CfuPluginEncoding ( + instruction = M"-------------------------0001011", + functionId = List(14 downto 12), + input2Kind = CfuPlugin.Input2Kind.RS + ) + ), + busParameter = CfuBusParameter( + CFU_VERSION = 0, + CFU_INTERFACE_ID_W = 0, + CFU_FUNCTION_ID_W = 3, + CFU_REORDER_ID_W = 0, + CFU_REQ_RESP_ID_W = 0, + CFU_INPUTS = 2, + CFU_INPUT_DATA_W = 32, + CFU_OUTPUTS = 1, + CFU_OUTPUT_DATA_W = 32, + CFU_FLOW_REQ_READY_ALWAYS = false, + CFU_FLOW_RESP_READY_ALWAYS = false, + CFU_WITH_STATUS = true, + CFU_RAW_INSN_W = 32, + CFU_CFU_ID_W = 4, + CFU_STATE_INDEX_NUM = 5 + ) + ) + + val toplevel = Murax(config) + + toplevel.rework { + for (plugin <- toplevel.system.cpu.plugins) plugin match { + case plugin: CfuPlugin => plugin.bus.toIo().setName("miaou") + case _ => + } + } + + toplevel + } + } +} + + +object Murax_iCE40_hx8k_breakout_board_xip{ + + case class SB_GB() extends BlackBox{ + val USER_SIGNAL_TO_GLOBAL_BUFFER = in Bool() + val GLOBAL_BUFFER_OUTPUT = out Bool() + } + + case class SB_IO_SCLK() extends BlackBox{ + addGeneric("PIN_TYPE", B"010000") + val PACKAGE_PIN = out Bool() + val OUTPUT_CLK = in Bool() + val CLOCK_ENABLE = in Bool() + val D_OUT_0 = in Bool() + val D_OUT_1 = in Bool() + setDefinitionName("SB_IO") + } + + case class SB_IO_DATA() extends BlackBox{ + addGeneric("PIN_TYPE", B"110000") + val PACKAGE_PIN = inout(Analog(Bool)) + val CLOCK_ENABLE = in Bool() + val INPUT_CLK = in Bool() + val OUTPUT_CLK = in Bool() + val OUTPUT_ENABLE = in Bool() + val D_OUT_0 = in Bool() + val D_OUT_1 = in Bool() + val D_IN_0 = out Bool() + val D_IN_1 = out Bool() + setDefinitionName("SB_IO") + } + + case class Murax_iCE40_hx8k_breakout_board_xip() extends Component{ + val io = new Bundle { + val mainClk = in Bool() + val jtag_tck = in Bool() + val jtag_tdi = in Bool() + val jtag_tdo = out Bool() + val jtag_tms = in Bool() + val uart_txd = out Bool() + val uart_rxd = in Bool() + + val mosi = inout(Analog(Bool)) + val miso = inout(Analog(Bool)) + val sclk = out Bool() + val spis = out Bool() + + val led = out Bits(8 bits) + } + val murax = Murax(MuraxConfig.default(withXip = true).copy(onChipRamSize = 8 kB)) + murax.io.asyncReset := False + + val mainClkBuffer = SB_GB() + mainClkBuffer.USER_SIGNAL_TO_GLOBAL_BUFFER <> io.mainClk + mainClkBuffer.GLOBAL_BUFFER_OUTPUT <> murax.io.mainClk + + val jtagClkBuffer = SB_GB() + jtagClkBuffer.USER_SIGNAL_TO_GLOBAL_BUFFER <> io.jtag_tck + jtagClkBuffer.GLOBAL_BUFFER_OUTPUT <> murax.io.jtag.tck + + io.led <> murax.io.gpioA.write(7 downto 0) + + murax.io.jtag.tdi <> io.jtag_tdi + murax.io.jtag.tdo <> io.jtag_tdo + murax.io.jtag.tms <> io.jtag_tms + murax.io.gpioA.read <> 0 + murax.io.uart.txd <> io.uart_txd + murax.io.uart.rxd <> io.uart_rxd + + + + val xip = new ClockingArea(murax.systemClockDomain) { + RegNext(murax.io.xip.ss.asBool) <> io.spis + + val sclkIo = SB_IO_SCLK() + sclkIo.PACKAGE_PIN <> io.sclk + sclkIo.CLOCK_ENABLE := True + + sclkIo.OUTPUT_CLK := ClockDomain.current.readClockWire + sclkIo.D_OUT_0 <> murax.io.xip.sclk.write(0) + sclkIo.D_OUT_1 <> RegNext(murax.io.xip.sclk.write(1)) + + val datas = for ((data, pin) <- (murax.io.xip.data, List(io.mosi, io.miso)).zipped) yield new Area { + val dataIo = SB_IO_DATA() + dataIo.PACKAGE_PIN := pin + dataIo.CLOCK_ENABLE := True + + dataIo.OUTPUT_CLK := ClockDomain.current.readClockWire + dataIo.OUTPUT_ENABLE <> data.writeEnable + dataIo.D_OUT_0 <> data.write(0) + dataIo.D_OUT_1 <> RegNext(data.write(1)) + + dataIo.INPUT_CLK := ClockDomain.current.readClockWire + data.read(0) := dataIo.D_IN_0 + data.read(1) := RegNext(dataIo.D_IN_1) + } + } + + } + + def main(args: Array[String]) { + SpinalVerilog(Murax_iCE40_hx8k_breakout_board_xip()) + } +} + +object MuraxDhrystoneReady{ + def main(args: Array[String]) { + SpinalVerilog(Murax(MuraxConfig.fast.copy(onChipRamSize = 256 kB))) + } +} + +object MuraxDhrystoneReadyMulDivStatic{ + def main(args: Array[String]) { + SpinalVerilog({ + val config = MuraxConfig.fast.copy(onChipRamSize = 256 kB) + config.cpuPlugins += new MulPlugin + config.cpuPlugins += new DivPlugin + config.cpuPlugins.remove(config.cpuPlugins.indexWhere(_.isInstanceOf[BranchPlugin])) + config.cpuPlugins +=new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ) + config.cpuPlugins += new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = true, + cmdForkPersistence = false, + prediction = STATIC, + catchAccessFault = false, + compressedGen = false + ) + config.cpuPlugins.remove(config.cpuPlugins.indexWhere(_.isInstanceOf[LightShifterPlugin])) + config.cpuPlugins += new FullBarrelShifterPlugin + Murax(config) + }) + } +} + +//Will blink led and echo UART RX to UART TX (in the verilator sim, type some text and press enter to send UART frame to the Murax RX pin) +object MuraxWithRamInit{ + def main(args: Array[String]) { + SpinalVhdl(Murax(MuraxConfig.default.copy(onChipRamSize = 4 kB, onChipRamHexFile = "src/main/ressource/hex/muraxDemo.hex"))) + } +} + +object Murax_arty{ + def main(args: Array[String]) { + val hex = "src/main/c/murax/hello_world/build/hello_world.hex" + SpinalVerilog(Murax(MuraxConfig.default(false).copy(coreFrequency = 100 MHz,onChipRamSize = 32 kB, onChipRamHexFile = hex))) + } +} + + +object MuraxAsicBlackBox extends App{ + println("Warning this soc do not has any rom to boot on.") + val config = SpinalConfig() + config.addStandardMemBlackboxing(blackboxAll) + config.generateVerilog(Murax(MuraxConfig.default())) +} + + +object de1_murax_franz{ + + case class de1_murax_franz() extends Component{ + val io = new Bundle { + val jtag_tck = in Bool() + val jtag_tdi = in Bool() + val jtag_tdo = out Bool() + val jtag_tms = in Bool() + val uart_txd = out Bool() + val uart_rxd = in Bool() + + val KEY0 = in Bool() + val CLOCK_50 = in Bool() + + val LEDR = out Bits(8 bits) + } + noIoPrefix() + + val murax = Murax(MuraxConfig.default.copy( + coreFrequency = 50 MHz, + onChipRamSize = 4 kB, + onChipRamHexFile = "src/main/ressource/hex/muraxDemo.hex")) + + io.LEDR <> murax.io.gpioA.write(7 downto 0) + + murax.io.jtag.tck <> io.jtag_tck + murax.io.jtag.tdi <> io.jtag_tdi + murax.io.jtag.tdo <> io.jtag_tdo + murax.io.jtag.tms <> io.jtag_tms + murax.io.gpioA.read <> 0 + murax.io.uart.txd <> io.uart_txd + murax.io.uart.rxd <> io.uart_rxd + murax.io.asyncReset <> ! io.KEY0 + murax.io.mainClk <> io.CLOCK_50 + + } + + def main(args: Array[String]) { + SpinalVhdl(de1_murax_franz()) + } +} + + + + diff --git a/VexRiscv/src/main/scala/vexriscv/demo/MuraxUtiles.scala b/VexRiscv/src/main/scala/vexriscv/demo/MuraxUtiles.scala new file mode 100644 index 0000000..22bc438 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/MuraxUtiles.scala @@ -0,0 +1,174 @@ +package vexriscv.demo + +import java.nio.{ByteBuffer, ByteOrder} + +import spinal.core._ +import spinal.lib.bus.amba3.apb.{Apb3, Apb3Config, Apb3SlaveFactory} +import spinal.lib.bus.misc.SizeMapping +import spinal.lib.misc.{HexTools, InterruptCtrl, Prescaler, Timer} +import spinal.lib._ +import spinal.lib.bus.simple._ +import vexriscv.plugin.{DBusSimpleBus, IBusSimpleBus} + +class MuraxMasterArbiter(pipelinedMemoryBusConfig : PipelinedMemoryBusConfig, bigEndian : Boolean = false) extends Component{ + val io = new Bundle{ + val iBus = slave(IBusSimpleBus(null)) + val dBus = slave(DBusSimpleBus(bigEndian)) + val masterBus = master(PipelinedMemoryBus(pipelinedMemoryBusConfig)) + } + + io.masterBus.cmd.valid := io.iBus.cmd.valid || io.dBus.cmd.valid + io.masterBus.cmd.write := io.dBus.cmd.valid && io.dBus.cmd.wr + io.masterBus.cmd.address := io.dBus.cmd.valid ? io.dBus.cmd.address | io.iBus.cmd.pc + io.masterBus.cmd.data := io.dBus.cmd.data + io.masterBus.cmd.mask := io.dBus.genMask(io.dBus.cmd) + io.iBus.cmd.ready := io.masterBus.cmd.ready && !io.dBus.cmd.valid + io.dBus.cmd.ready := io.masterBus.cmd.ready + + + val rspPending = RegInit(False) clearWhen(io.masterBus.rsp.valid) + val rspTarget = RegInit(False) + when(io.masterBus.cmd.fire && !io.masterBus.cmd.write){ + rspTarget := io.dBus.cmd.valid + rspPending := True + } + + when(rspPending && !io.masterBus.rsp.valid){ + io.iBus.cmd.ready := False + io.dBus.cmd.ready := False + io.masterBus.cmd.valid := False + } + + io.iBus.rsp.valid := io.masterBus.rsp.valid && !rspTarget + io.iBus.rsp.inst := io.masterBus.rsp.data + io.iBus.rsp.error := False + + io.dBus.rsp.ready := io.masterBus.rsp.valid && rspTarget + io.dBus.rsp.data := io.masterBus.rsp.data + io.dBus.rsp.error := False +} + + +case class MuraxPipelinedMemoryBusRam(onChipRamSize : BigInt, onChipRamHexFile : String, pipelinedMemoryBusConfig : PipelinedMemoryBusConfig, bigEndian : Boolean = false) extends Component{ + val io = new Bundle{ + val bus = slave(PipelinedMemoryBus(pipelinedMemoryBusConfig)) + } + + val ram = Mem(Bits(32 bits), onChipRamSize / 4) + io.bus.rsp.valid := RegNext(io.bus.cmd.fire && !io.bus.cmd.write) init(False) + io.bus.rsp.data := ram.readWriteSync( + address = (io.bus.cmd.address >> 2).resized, + data = io.bus.cmd.data, + enable = io.bus.cmd.valid, + write = io.bus.cmd.write, + mask = io.bus.cmd.mask + ) + io.bus.cmd.ready := True + + if(onChipRamHexFile != null){ + HexTools.initRam(ram, onChipRamHexFile, 0x80000000l) + if(bigEndian) + // HexTools.initRam (incorrectly) assumes little endian byte ordering + for((word, wordIndex) <- ram.initialContent.zipWithIndex) + ram.initialContent(wordIndex) = + ((word & 0xffl) << 24) | + ((word & 0xff00l) << 8) | + ((word & 0xff0000l) >> 8) | + ((word & 0xff000000l) >> 24) + } +} + + + +case class Apb3Rom(onChipRamBinFile : String) extends Component{ + import java.nio.file.{Files, Paths} + val byteArray = Files.readAllBytes(Paths.get(onChipRamBinFile)) + val wordCount = (byteArray.length+3)/4 + val buffer = ByteBuffer.wrap(Files.readAllBytes(Paths.get(onChipRamBinFile))).order(ByteOrder.LITTLE_ENDIAN); + val wordArray = (0 until wordCount).map(i => { + val v = buffer.getInt + if(v < 0) BigInt(v.toLong & 0xFFFFFFFFl) else BigInt(v) + }) + + val io = new Bundle{ + val apb = slave(Apb3(log2Up(wordCount*4),32)) + } + + val rom = Mem(Bits(32 bits), wordCount) initBigInt(wordArray) +// io.apb.PRDATA := rom.readSync(io.apb.PADDR >> 2) + io.apb.PRDATA := rom.readAsync(RegNext(io.apb.PADDR >> 2)) + io.apb.PREADY := True +} + + + +class MuraxPipelinedMemoryBusDecoder(master : PipelinedMemoryBus, val specification : Seq[(PipelinedMemoryBus,SizeMapping)], pipelineMaster : Boolean) extends Area{ + val masterPipelined = PipelinedMemoryBus(master.config) + if(!pipelineMaster) { + masterPipelined.cmd << master.cmd + masterPipelined.rsp >> master.rsp + } else { + masterPipelined.cmd <-< master.cmd + masterPipelined.rsp >> master.rsp + } + + val slaveBuses = specification.map(_._1) + val memorySpaces = specification.map(_._2) + + val hits = for((slaveBus, memorySpace) <- specification) yield { + val hit = memorySpace.hit(masterPipelined.cmd.address) + slaveBus.cmd.valid := masterPipelined.cmd.valid && hit + slaveBus.cmd.payload := masterPipelined.cmd.payload.resized + hit + } + val noHit = !hits.orR + masterPipelined.cmd.ready := (hits,slaveBuses).zipped.map(_ && _.cmd.ready).orR || noHit + + val rspPending = RegInit(False) clearWhen(masterPipelined.rsp.valid) setWhen(masterPipelined.cmd.fire && !masterPipelined.cmd.write) + val rspNoHit = RegNext(False) init(False) setWhen(noHit) + val rspSourceId = RegNextWhen(OHToUInt(hits), masterPipelined.cmd.fire) + masterPipelined.rsp.valid := slaveBuses.map(_.rsp.valid).orR || (rspPending && rspNoHit) + masterPipelined.rsp.payload := slaveBuses.map(_.rsp.payload).read(rspSourceId) + + when(rspPending && !masterPipelined.rsp.valid) { //Only one pending read request is allowed + masterPipelined.cmd.ready := False + slaveBuses.foreach(_.cmd.valid := False) + } +} + +class MuraxApb3Timer extends Component{ + val io = new Bundle { + val apb = slave(Apb3( + addressWidth = 8, + dataWidth = 32 + )) + val interrupt = out Bool() + } + + val prescaler = Prescaler(16) + val timerA,timerB = Timer(16) + + val busCtrl = Apb3SlaveFactory(io.apb) + val prescalerBridge = prescaler.driveFrom(busCtrl,0x00) + + val timerABridge = timerA.driveFrom(busCtrl,0x40)( + ticks = List(True, prescaler.io.overflow), + clears = List(timerA.io.full) + ) + + val timerBBridge = timerB.driveFrom(busCtrl,0x50)( + ticks = List(True, prescaler.io.overflow), + clears = List(timerB.io.full) + ) + + val interruptCtrl = InterruptCtrl(2) + val interruptCtrlBridge = interruptCtrl.driveFrom(busCtrl,0x10) + interruptCtrl.io.inputs(0) := timerA.io.full + interruptCtrl.io.inputs(1) := timerB.io.full + io.interrupt := interruptCtrl.io.pendings.orR +} + + +object MuraxApb3TimerGen extends App{ + SpinalVhdl(new MuraxApb3Timer()) +}
\ No newline at end of file diff --git a/VexRiscv/src/main/scala/vexriscv/demo/OpenRoad.scala b/VexRiscv/src/main/scala/vexriscv/demo/OpenRoad.scala new file mode 100644 index 0000000..3938eff --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/OpenRoad.scala @@ -0,0 +1,103 @@ +package vexriscv.demo + +import spinal.core._ +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.{Riscv, VexRiscv, VexRiscvConfig, plugin} +import vexriscv.plugin.{BranchPlugin, CsrAccess, CsrPlugin, CsrPluginConfig, DBusCachedPlugin, DecoderSimplePlugin, FullBarrelShifterPlugin, HazardSimplePlugin, IBusCachedPlugin, IntAluPlugin, MmuPlugin, MmuPortConfig, MulDivIterativePlugin, MulPlugin, RegFilePlugin, STATIC, SrcPlugin, YamlPlugin} + +object OpenRoad extends App{ + + def linuxConfig = VexRiscvConfig( + withMemoryStage = true, + withWriteBackStage = true, + List( + // new SingleInstructionLimiterPlugin(), + new IBusCachedPlugin( + resetVector = 0, + compressedGen = false, + prediction = vexriscv.plugin.NONE, + injectorStage = false, + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine = 64, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = true, + twoCycleRam = false, + twoCycleCache = true + ), + memoryTranslatorPortConfig = MmuPortConfig( + portTlbSize = 4 + ) + ), + new DBusCachedPlugin( + dBusCmdMasterPipe = true, + dBusCmdSlavePipe = true, + dBusRspSlavePipe = true, + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 64, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true, + asyncTagMemory = true, + withLrSc = true, + withAmo = true + // ) + ), + memoryTranslatorPortConfig = MmuPortConfig( + portTlbSize = 4 + ) + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false, + x0Init = true + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false + ), + new FullBarrelShifterPlugin(earlyInjection = true), + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulDivIterativePlugin( + genMul = true, + genDiv = true, + mulUnrollFactor = 32, + divUnrollFactor = 8 + ), + new CsrPlugin(CsrPluginConfig.openSbi(0,Riscv.misaToInt("imas")).copy(ebreakGen = false, mtvecAccess = CsrAccess.READ_WRITE)), //mtvecAccess read required by freertos + + new BranchPlugin( + earlyBranch = true, + catchAddressMisaligned = true, + fenceiGenAsAJump = false + ), + new MmuPlugin( + ioRange = (x => x(31)) + ), + new YamlPlugin("cpu0.yaml") + ) + ) + + SpinalConfig().addStandardMemBlackboxing(blackboxAllWhatsYouCan).generateVerilog(new VexRiscv(linuxConfig).setDefinitionName("VexRiscvMsuI4D4")) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/SynthesisBench.scala b/VexRiscv/src/main/scala/vexriscv/demo/SynthesisBench.scala new file mode 100644 index 0000000..6a044ea --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/SynthesisBench.scala @@ -0,0 +1,500 @@ +package vexriscv.demo + +import spinal.core._ +import spinal.lib._ +import spinal.lib.eda.bench._ +import spinal.lib.eda.icestorm.IcestormStdTargets +import spinal.lib.eda.xilinx.VivadoFlow +import spinal.lib.io.InOutWrapper +import vexriscv.demo.smp.VexRiscvSmpClusterGen +import vexriscv.plugin.CsrAccess.{READ_ONLY, READ_WRITE, WRITE_ONLY} +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} +import vexriscv.plugin.{BranchPlugin, CsrPlugin, CsrPluginConfig, DBusSimplePlugin, DecoderSimplePlugin, FullBarrelShifterPlugin, HazardSimplePlugin, IBusSimplePlugin, IntAluPlugin, LightShifterPlugin, NONE, RegFilePlugin, SrcPlugin, YamlPlugin} + +import scala.collection.mutable.ArrayBuffer +import scala.util.Random + +/** + * Created by PIC32F_USER on 16/07/2017. + */ +object VexRiscvSynthesisBench { + def main(args: Array[String]) { + + def wrap(that : => Component) : Component = that +// def wrap(that : => Component) : Component = { +// val c = that +// c.getAllIo.foreach(io => KeepAttribute(io.asDirectionLess())) +// c +// } +// Wrap with input/output registers +// def wrap(that : => Component) : Component = { +// //new WrapWithReg.Wrapper(that) +// val c = that +// c.rework { +// for (e <- c.getOrdredNodeIo) { +// if (e.isInput) { +// e.asDirectionLess() +// e := RegNext(RegNext(in(cloneOf(e)))) +// +// } else { +// e.asDirectionLess() +// out(cloneOf(e)) := RegNext(RegNext(e)) +// } +// } +// } +// c +// } + + // Wrap to do a decoding bench +// def wrap(that : => VexRiscv) : VexRiscv = { +// val top = that +// top.service(classOf[DecoderSimplePlugin]).bench(top) +// top +// } + + val twoStage = new Rtl { + override def getName(): String = "VexRiscv two stages" + override def getRtlPath(): String = "VexRiscvTwoStages.v" + SpinalVerilog(wrap(GenTwoThreeStage.cpu( + withMulDiv = false, + bypass = false, + barrielShifter = false, + withMemoryStage = false + )).setDefinitionName(getRtlPath().split("\\.").head)) + } + val twoStageBarell = new Rtl { + override def getName(): String = "VexRiscv two stages with barriel" + override def getRtlPath(): String = "VexRiscvTwoStagesBar.v" + SpinalVerilog(wrap(GenTwoThreeStage.cpu( + withMulDiv = false, + bypass = true, + barrielShifter = true, + withMemoryStage = false + )).setDefinitionName(getRtlPath().split("\\.").head)) + } + val twoStageMulDiv = new Rtl { + override def getName(): String = "VexRiscv two stages with Mul Div" + override def getRtlPath(): String = "VexRiscvTwoStagesMD.v" + SpinalVerilog(wrap(GenTwoThreeStage.cpu( + withMulDiv = true, + bypass = false, + barrielShifter = false, + withMemoryStage = false + )).setDefinitionName(getRtlPath().split("\\.").head)) + } + val twoStageAll = new Rtl { + override def getName(): String = "VexRiscv two stages with Mul Div fast" + override def getRtlPath(): String = "VexRiscvTwoStagesMDfast.v" + SpinalVerilog(wrap(GenTwoThreeStage.cpu( + withMulDiv = true, + bypass = true, + barrielShifter = true, + withMemoryStage = false + )).setDefinitionName(getRtlPath().split("\\.").head)) + } + + + val threeStage = new Rtl { + override def getName(): String = "VexRiscv three stages" + override def getRtlPath(): String = "VexRiscvThreeStages.v" + SpinalVerilog(wrap(GenTwoThreeStage.cpu( + withMulDiv = false, + bypass = false, + barrielShifter = false, + withMemoryStage = true + )).setDefinitionName(getRtlPath().split("\\.").head)) + } + val threeStageBarell = new Rtl { + override def getName(): String = "VexRiscv three stages with barriel" + override def getRtlPath(): String = "VexRiscvThreeStagesBar.v" + SpinalVerilog(wrap(GenTwoThreeStage.cpu( + withMulDiv = false, + bypass = true, + barrielShifter = true, + withMemoryStage = true + )).setDefinitionName(getRtlPath().split("\\.").head)) + } + val threeStageMulDiv = new Rtl { + override def getName(): String = "VexRiscv three stages with Mul Div" + override def getRtlPath(): String = "VexRiscvThreeStagesMD.v" + SpinalVerilog(wrap(GenTwoThreeStage.cpu( + withMulDiv = true, + bypass = false, + barrielShifter = false, + withMemoryStage = true + )).setDefinitionName(getRtlPath().split("\\.").head)) + } + val threeStageAll = new Rtl { + override def getName(): String = "VexRiscv three stages with Mul Div fast" + override def getRtlPath(): String = "VexRiscvThreeStagesMDfast.v" + SpinalVerilog(wrap(GenTwoThreeStage.cpu( + withMulDiv = true, + bypass = true, + barrielShifter = true, + withMemoryStage = true + )).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val smallestNoCsr = new Rtl { + override def getName(): String = "VexRiscv smallest no CSR" + override def getRtlPath(): String = "VexRiscvSmallestNoCsr.v" + SpinalVerilog(wrap(GenSmallestNoCsr.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val smallest = new Rtl { + override def getName(): String = "VexRiscv smallest" + override def getRtlPath(): String = "VexRiscvSmallest.v" + SpinalVerilog(wrap(GenSmallest.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val smallAndProductive = new Rtl { + override def getName(): String = "VexRiscv small and productive" + override def getRtlPath(): String = "VexRiscvSmallAndProductive.v" + SpinalVerilog(wrap(GenSmallAndProductive.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val smallAndProductiveWithICache = new Rtl { + override def getName(): String = "VexRiscv small and productive with instruction cache" + override def getRtlPath(): String = "VexRiscvSmallAndProductiveICache.v" + SpinalVerilog(wrap(GenSmallAndProductiveICache.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val fullNoMmuNoCache = new Rtl { + override def getName(): String = "VexRiscv full no MMU no cache" + override def getRtlPath(): String = "VexRiscvFullNoMmuNoCache.v" + SpinalVerilog(wrap(GenFullNoMmuNoCache.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + val fullNoMmu = new Rtl { + override def getName(): String = "VexRiscv full no MMU" + override def getRtlPath(): String = "VexRiscvFullNoMmu.v" + SpinalVerilog(wrap(GenFullNoMmu.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val noCacheNoMmuMaxPerf= new Rtl { + override def getName(): String = "VexRiscv no cache no MMU max perf" + override def getRtlPath(): String = "VexRiscvNoCacheNoMmuMaxPerf.v" + SpinalVerilog(wrap(GenNoCacheNoMmuMaxPerf.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val fullNoMmuMaxPerf= new Rtl { + override def getName(): String = "VexRiscv full no MMU max perf" + override def getRtlPath(): String = "VexRiscvFullNoMmuMaxPerf.v" + SpinalVerilog(wrap(GenFullNoMmuMaxPerf.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val full = new Rtl { + override def getName(): String = "VexRiscv full with MMU" + override def getRtlPath(): String = "VexRiscvFull.v" + SpinalVerilog(wrap(GenFull.cpu()).setDefinitionName(getRtlPath().split("\\.").head)) + } + + + val linuxBalanced = new Rtl { + override def getName(): String = "VexRiscv linux balanced" + override def getRtlPath(): String = "VexRiscvLinuxBalanced.v" + SpinalConfig(inlineRom = true).generateVerilog(wrap(new VexRiscv(LinuxGen.configFull(false, true))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val linuxBalancedSmp = new Rtl { + override def getName(): String = "VexRiscv linux balanced SMP" + override def getRtlPath(): String = "VexRiscvLinuxBalancedSmp.v" + SpinalConfig(inlineRom = true).generateVerilog(wrap(new VexRiscv(LinuxGen.configFull(false, true, withSmp = true))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val linuxFpuSmp = new Rtl { + override def getName(): String = "VexRiscv linux Fpu SMP" + override def getRtlPath(): String = "VexRiscvLinuxFpuSmp.v" + SpinalConfig(inlineRom = true).generateVerilog(wrap(new VexRiscv( + VexRiscvSmpClusterGen.vexRiscvConfig( + hartId = 0, + ioRange = _ (31 downto 28) === 0xF, + resetVector = 0x80000000l, + iBusWidth = 64, + dBusWidth = 64, + loadStoreWidth = 64, + iCacheSize = 4096*2, + dCacheSize = 4096*2, + iCacheWays = 2, + dCacheWays = 2, + withFloat = true, + withDouble = true, + externalFpu = false, + simHalt = true + ))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val linuxFpuSmpNoDecoder = new Rtl { + override def getName(): String = "VexRiscv linux Fpu SMP without decoder" + override def getRtlPath(): String = "VexRiscvLinuxFpuSmpNoDecoder.v" + SpinalConfig(inlineRom = true).generateVerilog(wrap(new VexRiscv( + VexRiscvSmpClusterGen.vexRiscvConfig( + hartId = 0, + ioRange = _ (31 downto 28) === 0xF, + resetVector = 0x80000000l, + iBusWidth = 64, + dBusWidth = 64, + loadStoreWidth = 64, + iCacheSize = 4096*2, + dCacheSize = 4096*2, + iCacheWays = 2, + dCacheWays = 2, + withFloat = true, + withDouble = true, + externalFpu = false, + simHalt = true, + decoderIsolationBench = true + ))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val linuxFpuSmpStupidDecoder = new Rtl { + override def getName(): String = "VexRiscv linux Fpu SMP stupid decoder" + override def getRtlPath(): String = "VexRiscvLinuxFpuSmpStupidDecoder.v" + SpinalConfig(inlineRom = true).generateVerilog(wrap(new VexRiscv( + VexRiscvSmpClusterGen.vexRiscvConfig( + hartId = 0, + ioRange = _ (31 downto 28) === 0xF, + resetVector = 0x80000000l, + iBusWidth = 64, + dBusWidth = 64, + loadStoreWidth = 64, + iCacheSize = 4096*2, + dCacheSize = 4096*2, + iCacheWays = 2, + dCacheWays = 2, + withFloat = true, + withDouble = true, + externalFpu = false, + simHalt = true, + decoderStupid = true + ))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + + + val rtls = List( +// linuxFpuSmp, linuxFpuSmpNoDecoder, linuxFpuSmpStupidDecoder + twoStage, twoStageBarell, twoStageMulDiv, twoStageAll, + threeStage, threeStageBarell, threeStageMulDiv, threeStageAll, + smallestNoCsr, smallest, smallAndProductive, smallAndProductiveWithICache, fullNoMmuNoCache, noCacheNoMmuMaxPerf, fullNoMmuMaxPerf, fullNoMmu, full, linuxBalanced, linuxBalancedSmp + ) +// val rtls = List(linuxBalanced, linuxBalancedSmp) +// val rtls = List(smallest) + val targets = XilinxStdTargets() ++ AlteraStdTargets() ++ IcestormStdTargets().take(1) ++ List( + new Target { + override def getFamilyName(): String = "Kintex UltraScale" + override def synthesise(rtl: Rtl, workspace: String): Report = { + VivadoFlow( + frequencyTarget = 50 MHz, + vivadoPath=sys.env.getOrElse("VIVADO_ARTIX_7_BIN", null), + workspacePath=workspace + "_area", + rtl=rtl, + family=getFamilyName(), + device="xcku035-fbva900-3-e" + ) + } + }, + new Target { + override def getFamilyName(): String = "Kintex UltraScale" + override def synthesise(rtl: Rtl, workspace: String): Report = { + VivadoFlow( + frequencyTarget = 800 MHz, + vivadoPath=sys.env.getOrElse("VIVADO_ARTIX_7_BIN", null), + workspacePath=workspace + "_fmax", + rtl=rtl, + family=getFamilyName(), + device="xcku035-fbva900-3-e" + ) + } + }, + new Target { + override def getFamilyName(): String = "Kintex UltraScale+" + override def synthesise(rtl: Rtl, workspace: String): Report = { + VivadoFlow( + frequencyTarget = 50 MHz, + vivadoPath=sys.env.getOrElse("VIVADO_ARTIX_7_BIN", null), + workspacePath=workspace + "_area", + rtl=rtl, + family=getFamilyName(), + device="xcku3p-ffvd900-3-e" + ) + } + }, + new Target { + override def getFamilyName(): String = "Kintex UltraScale+" + override def synthesise(rtl: Rtl, workspace: String): Report = { + VivadoFlow( + frequencyTarget = 800 MHz, + vivadoPath=sys.env.getOrElse("VIVADO_ARTIX_7_BIN", null), + workspacePath=workspace + "_fmax", + rtl=rtl, + family=getFamilyName(), + device="xcku3p-ffvd900-3-e" + ) + } + } + ) + // val targets = IcestormStdTargets() + Bench(rtls, targets) + } +} + + +object BrieySynthesisBench { + def main(args: Array[String]) { + val briey = new Rtl { + override def getName(): String = "Briey" + override def getRtlPath(): String = "Briey.v" + SpinalVerilog({ + val briey = InOutWrapper(new Briey(BrieyConfig.default).setDefinitionName(getRtlPath().split("\\.").head)) + briey.io.axiClk.setName("clk") + briey + }) + } + + + val rtls = List(briey) + + val targets = XilinxStdTargets() ++ AlteraStdTargets() ++ IcestormStdTargets().take(1) + + Bench(rtls, targets) + } +} + + + + +object MuraxSynthesisBench { + def main(args: Array[String]) { + val murax = new Rtl { + override def getName(): String = "Murax" + override def getRtlPath(): String = "Murax.v" + SpinalVerilog({ + val murax = InOutWrapper(new Murax(MuraxConfig.default.copy(gpioWidth = 8)).setDefinitionName(getRtlPath().split("\\.").head)) + murax.io.mainClk.setName("clk") + murax + }) + } + + + val muraxFast = new Rtl { + override def getName(): String = "MuraxFast" + override def getRtlPath(): String = "MuraxFast.v" + SpinalVerilog({ + val murax = InOutWrapper(new Murax(MuraxConfig.fast.copy(gpioWidth = 8)).setDefinitionName(getRtlPath().split("\\.").head)) + murax.io.mainClk.setName("clk") + murax + }) + } + + val rtls = List(murax, muraxFast) + + val targets = XilinxStdTargets() ++ AlteraStdTargets() ++ IcestormStdTargets().take(1) + + Bench(rtls, targets) + } +} + +object AllSynthesisBench { + def main(args: Array[String]): Unit = { + VexRiscvSynthesisBench.main(args) + BrieySynthesisBench.main(args) + MuraxSynthesisBench.main(args) + + } +} + + + +object VexRiscvCustomSynthesisBench { + def main(args: Array[String]) { + + + def gen(csr : CsrPlugin) = new VexRiscv( + config = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = false + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + csr, + new FullBarrelShifterPlugin(), + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = false + ), + new YamlPlugin("cpu0.yaml") + ) + ) + ) + + + val fixedMtvec = new Rtl { + override def getName(): String = "Fixed MTVEC" + override def getRtlPath(): String = "fixedMtvec.v" + SpinalVerilog(gen(new CsrPlugin(CsrPluginConfig.smallest(0x80000000l))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val writeOnlyMtvec = new Rtl { + override def getName(): String = "write only MTVEC" + override def getRtlPath(): String = "woMtvec.v" + SpinalVerilog(gen(new CsrPlugin(CsrPluginConfig.smallest(null).copy(mtvecAccess = WRITE_ONLY))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val readWriteMtvec = new Rtl { + override def getName(): String = "read write MTVEC" + override def getRtlPath(): String = "wrMtvec.v" + SpinalVerilog(gen(new CsrPlugin(CsrPluginConfig.smallest(null).copy(mtvecAccess = READ_WRITE))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + val fixedMtvecRoCounter = new Rtl { + override def getName(): String = "Fixed MTVEC, read only mcycle/minstret" + override def getRtlPath(): String = "fixedMtvecRoCounter.v" + SpinalVerilog(gen(new CsrPlugin(CsrPluginConfig.smallest(0x80000000l).copy(mcycleAccess = READ_ONLY, minstretAccess = READ_ONLY))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + + val rwMtvecRoCounter = new Rtl { + override def getName(): String = "read write MTVEC, read only mcycle/minstret" + override def getRtlPath(): String = "readWriteMtvecRoCounter.v" + SpinalVerilog(gen(new CsrPlugin(CsrPluginConfig.smallest(null).copy(mtvecAccess = READ_WRITE, mcycleAccess = READ_ONLY, minstretAccess = READ_ONLY))).setDefinitionName(getRtlPath().split("\\.").head)) + } + + + // val rtls = List(twoStage, twoStageBarell, twoStageMulDiv, twoStageAll, smallestNoCsr, smallest, smallAndProductive, smallAndProductiveWithICache, fullNoMmuNoCache, noCacheNoMmuMaxPerf, fullNoMmuMaxPerf, fullNoMmu, full, linuxBalanced, linuxBalancedSmp) + val rtls = List(fixedMtvec, writeOnlyMtvec, readWriteMtvec,fixedMtvecRoCounter, rwMtvecRoCounter) + // val rtls = List(smallest) + val targets = XilinxStdTargets() ++ AlteraStdTargets() ++ IcestormStdTargets().take(1) + + // val targets = IcestormStdTargets() + Bench(rtls, targets) + } +}
\ No newline at end of file diff --git a/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAhbLite3.scala b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAhbLite3.scala new file mode 100644 index 0000000..f817fb3 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAhbLite3.scala @@ -0,0 +1,180 @@ +package vexriscv.demo + + +import spinal.core._ +import spinal.lib._ +import spinal.lib.bus.avalon.AvalonMM +import spinal.lib.com.jtag.{Jtag, JtagTapInstructionCtrl} +import spinal.lib.eda.altera.{InterruptReceiverTag, QSysify, ResetEmitterTag} +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 14.07.17. + */ +//class VexRiscvAvalon(debugClockDomain : ClockDomain) extends Component{ +// +//} + +//make clean run DBUS=SIMPLE_AHBLITE3 IBUS=SIMPLE_AHBLITE3 MMU=no CSR=no DEBUG_PLUGIN=STD + +object VexRiscvAhbLite3{ + def main(args: Array[String]) { + val report = SpinalConfig(mode = if(args.contains("--vhdl")) VHDL else Verilog).generate{ + + //CPU configuration + val cpuConfig = VexRiscvConfig( + plugins = List( + new IBusSimplePlugin( + resetVector = 0x80000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = true, + prediction = STATIC, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ), +// new IBusCachedPlugin( +// config = InstructionCacheConfig( +// cacheSize = 4096, +// bytePerLine =32, +// wayCount = 1, +// addressWidth = 32, +// cpuDataWidth = 32, +// memDataWidth = 32, +// catchIllegalAccess = true, +// catchAccessFault = true, +// catchMemoryTranslationMiss = true, +// asyncTagMemory = false, +// twoCycleRam = true +// ) +// // askMemoryTranslation = true, +// // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( +// // portTlbSize = 4 +// // ) +// ), +// new DBusCachedPlugin( +// config = new DataCacheConfig( +// cacheSize = 4096, +// bytePerLine = 32, +// wayCount = 1, +// addressWidth = 32, +// cpuDataWidth = 32, +// memDataWidth = 32, +// catchAccessError = true, +// catchIllegal = true, +// catchUnaligned = true, +// catchMemoryTranslationMiss = true +// ), +// memoryTranslatorPortConfig = null +// // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( +// // portTlbSize = 6 +// // ) +// ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new MulPlugin, + new DivPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new CsrPlugin( + config = CsrPluginConfig( + catchIllegalAccess = false, + mvendorid = null, + marchid = null, + mimpid = null, + mhartid = null, + misaExtensionsInit = 66, + misaAccess = CsrAccess.NONE, + mtvecAccess = CsrAccess.NONE, + mtvecInit = 0x00000020l, + mepcAccess = CsrAccess.READ_WRITE, + mscratchGen = false, + mcauseAccess = CsrAccess.READ_ONLY, + mbadaddrAccess = CsrAccess.READ_ONLY, + mcycleAccess = CsrAccess.NONE, + minstretAccess = CsrAccess.NONE, + ecallGen = false, + wfiGenAsWait = false, + ucycleAccess = CsrAccess.NONE, + uinstretAccess = CsrAccess.NONE + ) + ), + new YamlPlugin("cpu0.yaml") + ) + ) + + //CPU instanciation + val cpu = new VexRiscv(cpuConfig) + + //CPU modifications to be an AhbLite3 one + cpu.rework { + for (plugin <- cpuConfig.plugins) plugin match { + case plugin: IBusSimplePlugin => { + plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus + master(plugin.iBus.toAhbLite3Master()).setName("iBusAhbLite3") + } + case plugin: DBusSimplePlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toAhbLite3Master(avoidWriteToReadHazard = true)).setName("dBusAhbLite3") + } +// case plugin: IBusCachedPlugin => { +// plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus +// iBus = master(plugin.iBus.toAvalon()) +// .setName("iBusAvalon") +// .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) +// } +// case plugin: DBusCachedPlugin => { +// plugin.dBus.setAsDirectionLess() +// master(plugin.dBus.toAvalon()) +// .setName("dBusAvalon") +// .addTag(ClockDomainTag(ClockDomain.current)) +// } + case plugin: DebugPlugin if args.contains("--jtag")=> plugin.debugClockDomain { + plugin.io.bus.setAsDirectionLess() + val jtag = slave(new Jtag()).setName("jtag") + jtag <> plugin.io.bus.fromJtag() + +// // On Artix FPGA jtag : +// val jtagCtrl = JtagTapInstructionCtrl() +// val tap = jtagCtrl.fromXilinxBscane2(userId = 1) +// jtagCtrl <> plugin.io.bus.fromJtagInstructionCtrl(ClockDomain(tap.TCK)) + } + case _ => + } + } + cpu + } + } +} + diff --git a/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAvalonForSim.scala b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAvalonForSim.scala new file mode 100644 index 0000000..b2c3f69 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAvalonForSim.scala @@ -0,0 +1,196 @@ +package vexriscv.demo + +import vexriscv.plugin._ +import vexriscv.{VexRiscv, plugin, VexRiscvConfig} +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import spinal.core._ +import spinal.lib._ +import spinal.lib.bus.amba3.apb.Apb3 +import spinal.lib.bus.amba4.axi.{Axi4Shared, Axi4ReadOnly} +import spinal.lib.bus.avalon.AvalonMM +import spinal.lib.eda.altera.{ResetEmitterTag, InterruptReceiverTag, QSysify} + +/** + * Created by spinalvm on 14.07.17. + */ +//class VexRiscvAvalon(debugClockDomain : ClockDomain) extends Component{ +// +//} + +//make clean run DBUS=CACHED_AVALON IBUS=CACHED_AVALON MMU=no CSR=no DEBUG_PLUGIN=AVALON + +object VexRiscvAvalonForSim{ + def main(args: Array[String]) { + val report = SpinalVerilog{ + + //CPU configuration + val cpuConfig = VexRiscvConfig( + plugins = List( + /* new IBusSimplePlugin( + resetVector = 0x00000000l, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = STATIC, + catchAccessFault = false, + compressedGen = false + ), + new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false + ),*/ + new IBusCachedPlugin( + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine =32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = true + ) + // askMemoryTranslation = true, + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 4 + // ) + ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ), + memoryTranslatorPortConfig = null + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 6 + // ) + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new MulPlugin, + new DivPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new CsrPlugin( + config = CsrPluginConfig( + catchIllegalAccess = false, + mvendorid = null, + marchid = null, + mimpid = null, + mhartid = null, + misaExtensionsInit = 66, + misaAccess = CsrAccess.NONE, + mtvecAccess = CsrAccess.NONE, + mtvecInit = 0x00000020l, + mepcAccess = CsrAccess.READ_WRITE, + mscratchGen = false, + mcauseAccess = CsrAccess.READ_ONLY, + mbadaddrAccess = CsrAccess.READ_ONLY, + mcycleAccess = CsrAccess.NONE, + minstretAccess = CsrAccess.NONE, + ecallGen = false, + wfiGenAsWait = false, + ucycleAccess = CsrAccess.NONE, + uinstretAccess = CsrAccess.NONE + ) + ), + new YamlPlugin("cpu0.yaml") + ) + ) + + //CPU instanciation + val cpu = new VexRiscv(cpuConfig) + + //CPU modifications to be an Avalon one + //cpu.setDefinitionName("VexRiscvAvalon") + cpu.rework { + var iBus : AvalonMM = null + for (plugin <- cpuConfig.plugins) plugin match { + case plugin: IBusSimplePlugin => { + plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus + iBus = master(plugin.iBus.toAvalon()) + .setName("iBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) + } + case plugin: IBusCachedPlugin => { + plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus + iBus = master(plugin.iBus.toAvalon()) + .setName("iBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) + } + case plugin: DBusSimplePlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toAvalon()) + .setName("dBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) + } + case plugin: DBusCachedPlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toAvalon()) + .setName("dBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) + } + case plugin: DebugPlugin => plugin.debugClockDomain { + plugin.io.bus.setAsDirectionLess() + slave(plugin.io.bus.fromAvalon()) + .setName("debugBusAvalon") + .addTag(ClockDomainTag(plugin.debugClockDomain)) + .parent = null //Avoid the io bundle to be interpreted as a QSys conduit + plugin.io.resetOut + .addTag(ResetEmitterTag(plugin.debugClockDomain)) + .parent = null //Avoid the io bundle to be interpreted as a QSys conduit + } + case _ => + } + for (plugin <- cpuConfig.plugins) plugin match { + case plugin: CsrPlugin => { + plugin.externalInterrupt + .addTag(InterruptReceiverTag(iBus, ClockDomain.current)) + plugin.timerInterrupt + .addTag(InterruptReceiverTag(iBus, ClockDomain.current)) + } + case _ => + } + } + cpu + } + + //Generate the QSys TCL script to integrate the CPU + QSysify(report.toplevel) + } +} + diff --git a/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAvalonWithIntegratedJtag.scala b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAvalonWithIntegratedJtag.scala new file mode 100644 index 0000000..063d945 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAvalonWithIntegratedJtag.scala @@ -0,0 +1,191 @@ +package vexriscv.demo + +import spinal.core._ +import spinal.lib._ +import spinal.lib.bus.avalon.AvalonMM +import spinal.lib.com.jtag.Jtag +import spinal.lib.eda.altera.{InterruptReceiverTag, QSysify, ResetEmitterTag} +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 14.07.17. + */ +//class VexRiscvAvalon(debugClockDomain : ClockDomain) extends Component{ +// +//} + + +object VexRiscvAvalonWithIntegratedJtag{ + def main(args: Array[String]) { + val report = SpinalVerilog{ + + //CPU configuration + val cpuConfig = VexRiscvConfig( + plugins = List( + new PcManagerSimplePlugin(0x00000000l, false), +// new IBusSimplePlugin( +// interfaceKeepData = false, +// catchAccessFault = false +// ), +// new DBusSimplePlugin( +// catchAddressMisaligned = false, +// catchAccessFault = false +// ), + new IBusCachedPlugin( + prediction = STATIC, + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine =32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = true, + twoCycleCache = true + ) + // askMemoryTranslation = true, + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 4 + // ) + ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ), + memoryTranslatorPortConfig = null + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 6 + // ) + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new MulPlugin, + new DivPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new CsrPlugin( + config = CsrPluginConfig( + catchIllegalAccess = false, + mvendorid = null, + marchid = null, + mimpid = null, + mhartid = null, + misaExtensionsInit = 66, + misaAccess = CsrAccess.NONE, + mtvecAccess = CsrAccess.NONE, + mtvecInit = 0x00000020l, + mepcAccess = CsrAccess.READ_WRITE, + mscratchGen = false, + mcauseAccess = CsrAccess.READ_ONLY, + mbadaddrAccess = CsrAccess.READ_ONLY, + mcycleAccess = CsrAccess.NONE, + minstretAccess = CsrAccess.NONE, + ecallGen = false, + wfiGenAsWait = false, + ucycleAccess = CsrAccess.NONE, + uinstretAccess = CsrAccess.NONE + ) + ), + new YamlPlugin("cpu0.yaml") + ) + ) + + //CPU instanciation + val cpu = new VexRiscv(cpuConfig) + + //CPU modifications to be an Avalon one + cpu.setDefinitionName("VexRiscvAvalon") + cpu.rework { + var iBus : AvalonMM = null + for (plugin <- cpuConfig.plugins) plugin match { + case plugin: IBusSimplePlugin => { + plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus + iBus = master(plugin.iBus.toAvalon()) + .setName("iBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) + } + case plugin: IBusCachedPlugin => { + plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus + iBus = master(plugin.iBus.toAvalon()) + .setName("iBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) + } + case plugin: DBusSimplePlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toAvalon()) + .setName("dBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) + } + case plugin: DBusCachedPlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toAvalon()) + .setName("dBusAvalon") + .addTag(ClockDomainTag(ClockDomain.current)) + } + case plugin: DebugPlugin => plugin.debugClockDomain { + plugin.io.bus.setAsDirectionLess() + val jtag = slave(new Jtag()) + .setName("jtag") + jtag <> plugin.io.bus.fromJtag() + plugin.io.resetOut + .addTag(ResetEmitterTag(plugin.debugClockDomain)) + .parent = null //Avoid the io bundle to be interpreted as a QSys conduit + } + case _ => + } + for (plugin <- cpuConfig.plugins) plugin match { + case plugin: CsrPlugin => { + plugin.externalInterrupt + .addTag(InterruptReceiverTag(iBus, ClockDomain.current)) + plugin.timerInterrupt + .addTag(InterruptReceiverTag(iBus, ClockDomain.current)) + } + case _ => + } + } + cpu + } + + //Generate the QSys TCL script to integrate the CPU + QSysify(report.toplevel) + } +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAxi4WithIntegratedJtag.scala b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAxi4WithIntegratedJtag.scala new file mode 100644 index 0000000..67556e9 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvAxi4WithIntegratedJtag.scala @@ -0,0 +1,189 @@ +package vexriscv.demo + +import spinal.core._ +import spinal.lib._ +import spinal.lib.bus.amba4.axi.Axi4ReadOnly +import spinal.lib.bus.avalon.AvalonMM +import spinal.lib.com.jtag.Jtag +import spinal.lib.eda.altera.{InterruptReceiverTag, QSysify, ResetEmitterTag} +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 14.07.17. + */ +//class VexRiscvAvalon(debugClockDomain : ClockDomain) extends Component{ +// +//} + + +object VexRiscvAxi4WithIntegratedJtag{ + def main(args: Array[String]) { + val report = SpinalVerilog{ + + //CPU configuration + val cpuConfig = VexRiscvConfig( + plugins = List( + new PcManagerSimplePlugin(0x00000000l, false), +// new IBusSimplePlugin( +// interfaceKeepData = false, +// catchAccessFault = false +// ), +// new DBusSimplePlugin( +// catchAddressMisaligned = false, +// catchAccessFault = false +// ), + new IBusCachedPlugin( + prediction = STATIC, + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine =32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = true, + twoCycleCache = true + ) + // askMemoryTranslation = true, + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 4 + // ) + ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ), + memoryTranslatorPortConfig = null + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 6 + // ) + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new MulPlugin, + new DivPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new CsrPlugin( + config = CsrPluginConfig( + catchIllegalAccess = false, + mvendorid = null, + marchid = null, + mimpid = null, + mhartid = null, + misaExtensionsInit = 66, + misaAccess = CsrAccess.NONE, + mtvecAccess = CsrAccess.NONE, + mtvecInit = 0x00000020l, + mepcAccess = CsrAccess.READ_WRITE, + mscratchGen = false, + mcauseAccess = CsrAccess.READ_ONLY, + mbadaddrAccess = CsrAccess.READ_ONLY, + mcycleAccess = CsrAccess.NONE, + minstretAccess = CsrAccess.NONE, + ecallGen = false, + wfiGenAsWait = false, + ucycleAccess = CsrAccess.NONE, + uinstretAccess = CsrAccess.NONE + ) + ), + new YamlPlugin("cpu0.yaml") + ) + ) + + //CPU instanciation + val cpu = new VexRiscv(cpuConfig) + + //CPU modifications to be an Avalon one + cpu.setDefinitionName("VexRiscvAxi4") + cpu.rework { + var iBus : Axi4ReadOnly = null + for (plugin <- cpuConfig.plugins) plugin match { + case plugin: IBusSimplePlugin => { + plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus + iBus = master(plugin.iBus.toAxi4ReadOnly().toFullConfig()) + .setName("iBusAxi") + .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) + } + case plugin: IBusCachedPlugin => { + plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus + iBus = master(plugin.iBus.toAxi4ReadOnly().toFullConfig()) + .setName("iBusAxi") + .addTag(ClockDomainTag(ClockDomain.current)) //Specify a clock domain to the iBus (used by QSysify) + } + case plugin: DBusSimplePlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toAxi4Shared().toAxi4().toFullConfig()) + .setName("dBusAxi") + .addTag(ClockDomainTag(ClockDomain.current)) + } + case plugin: DBusCachedPlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toAxi4Shared().toAxi4().toFullConfig()) + .setName("dBusAxi") + .addTag(ClockDomainTag(ClockDomain.current)) + } + case plugin: DebugPlugin => plugin.debugClockDomain { + plugin.io.bus.setAsDirectionLess() + val jtag = slave(new Jtag()) + .setName("jtag") + jtag <> plugin.io.bus.fromJtag() + plugin.io.resetOut + .addTag(ResetEmitterTag(plugin.debugClockDomain)) + .parent = null //Avoid the io bundle to be interpreted as a QSys conduit + } + case _ => + } + for (plugin <- cpuConfig.plugins) plugin match { + case plugin: CsrPlugin => { + plugin.externalInterrupt + .addTag(InterruptReceiverTag(iBus, ClockDomain.current)) + plugin.timerInterrupt + .addTag(InterruptReceiverTag(iBus, ClockDomain.current)) + } + case _ => + } + } + cpu + } + } +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvCachedWishboneForSim.scala b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvCachedWishboneForSim.scala new file mode 100644 index 0000000..88cad3d --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/VexRiscvCachedWishboneForSim.scala @@ -0,0 +1,144 @@ +package vexriscv.demo + +import spinal.core._ +import spinal.lib._ +import spinal.lib.bus.avalon.AvalonMM +import spinal.lib.eda.altera.{InterruptReceiverTag, QSysify, ResetEmitterTag} +import vexriscv.ip.{DataCacheConfig, InstructionCacheConfig} +import vexriscv.plugin._ +import vexriscv.{VexRiscv, VexRiscvConfig, plugin} + +/** + * Created by spinalvm on 14.07.17. + */ +//class VexRiscvAvalon(debugClockDomain : ClockDomain) extends Component{ +// +//} + + +// make clean run DBUS=CACHED_WISHBONE IBUS=CACHED_WISHBONE MMU=no CSR=no DEBUG_PLUGIN=no +object VexRiscvCachedWishboneForSim{ + def main(args: Array[String]) { + val report = SpinalVerilog{ + + //CPU configuration + val cpuConfig = VexRiscvConfig( + plugins = List( +// new IBusSimplePlugin( +// resetVector = 0x80000000l, +// prediction = STATIC +// ), +// new DBusSimplePlugin( +// catchAddressMisaligned = false, +// catchAccessFault = false +// ), + new IBusCachedPlugin( + resetVector = 0x80000000l, + prediction = STATIC, + config = InstructionCacheConfig( + cacheSize = 4096, + bytePerLine =32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = true + ) + // askMemoryTranslation = true, + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 4 + // ) + ), + new DBusCachedPlugin( + config = new DataCacheConfig( + cacheSize = 4096, + bytePerLine = 32, + wayCount = 1, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = 32, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true + ), + dBusCmdMasterPipe = true, //required for wishbone + memoryTranslatorPortConfig = null + // memoryTranslatorPortConfig = MemoryTranslatorPortConfig( + // portTlbSize = 6 + // ) + ), + new StaticMemoryTranslatorPlugin( + ioRange = _(31 downto 28) === 0xF + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true + ), + new RegFilePlugin( + regFileReadyKind = plugin.SYNC, + zeroBoot = false + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false, + executeInsertion = true + ), + new FullBarrelShifterPlugin, + new MulPlugin, + new DivPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), +// new DebugPlugin(ClockDomain.current.clone(reset = Bool().setName("debugReset"))), + new BranchPlugin( + earlyBranch = false, + catchAddressMisaligned = true + ), + new CsrPlugin( + config = CsrPluginConfig.small(mtvecInit = 0x80000020l) + ), + new YamlPlugin("cpu0.yaml") + ) + ) + + //CPU instanciation + val cpu = new VexRiscv(cpuConfig) + + //CPU modifications to be an Avalon one + //cpu.setDefinitionName("VexRiscvAvalon") + cpu.rework { + for (plugin <- cpuConfig.plugins) plugin match { + case plugin: IBusSimplePlugin => { + plugin.iBus.setAsDirectionLess() //Unset IO properties of iBus + master(plugin.iBus.toWishbone()).setName("iBusWishbone") + } + case plugin: IBusCachedPlugin => { + plugin.iBus.setAsDirectionLess() + master(plugin.iBus.toWishbone()).setName("iBusWishbone") + } + case plugin: DBusSimplePlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toWishbone()).setName("dBusWishbone") + } + case plugin: DBusCachedPlugin => { + plugin.dBus.setAsDirectionLess() + master(plugin.dBus.toWishbone()).setName("dBusWishbone") + } + case _ => + } + } + cpu + } + + //Generate the QSys TCL script to integrate the CPU + QSysify(report.toplevel) + } +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/smp/Misc.scala b/VexRiscv/src/main/scala/vexriscv/demo/smp/Misc.scala new file mode 100644 index 0000000..58bad63 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/smp/Misc.scala @@ -0,0 +1,289 @@ +package vexriscv.demo.smp + + +import spinal.core._ +import spinal.core.fiber._ +import spinal.lib.bus.bmb._ +import spinal.lib.bus.wishbone.{Wishbone, WishboneConfig, WishboneSlaveFactory} +import spinal.lib.com.jtag.Jtag +import spinal.lib._ +import spinal.lib.bus.bmb.sim.{BmbMemoryMultiPort, BmbMemoryTester} +import spinal.lib.bus.misc.{AddressMapping, DefaultMapping, SizeMapping} +import spinal.lib.eda.bench.Bench +import spinal.lib.generator._ +import spinal.lib.misc.Clint +import spinal.lib.sim.{SimData, SparseMemory, StreamDriver, StreamMonitor, StreamReadyRandomizer} +import vexriscv.{VexRiscv, VexRiscvConfig} +import vexriscv.plugin.{CsrPlugin, DBusCachedPlugin, DebugPlugin, IBusCachedPlugin} + +import scala.collection.mutable +import scala.util.Random + +case class LiteDramNativeParameter(addressWidth : Int, dataWidth : Int) + +case class LiteDramNativeCmd(p : LiteDramNativeParameter) extends Bundle{ + val we = Bool() + val addr = UInt(p.addressWidth bits) +} + +case class LiteDramNativeWData(p : LiteDramNativeParameter) extends Bundle{ + val data = Bits(p.dataWidth bits) + val we = Bits(p.dataWidth/8 bits) +} + +case class LiteDramNativeRData(p : LiteDramNativeParameter) extends Bundle{ + val data = Bits(p.dataWidth bits) +} + + +case class LiteDramNative(p : LiteDramNativeParameter) extends Bundle with IMasterSlave { + val cmd = Stream(LiteDramNativeCmd(p)) + val wdata = Stream(LiteDramNativeWData(p)) + val rdata = Stream(LiteDramNativeRData(p)) + override def asMaster(): Unit = { + master(cmd, wdata) + slave(rdata) + } + + def fromBmb(bmb : Bmb, wdataFifoSize : Int, rdataFifoSize : Int) = { + val bridge = BmbToLiteDram( + bmbParameter = bmb.p, + liteDramParameter = this.p, + wdataFifoSize = wdataFifoSize, + rdataFifoSize = rdataFifoSize + ) + bridge.io.input << bmb + bridge.io.output <> this + bridge + } + + def simSlave(ram : SparseMemory,cd : ClockDomain, bmb : Bmb = null): Unit ={ + import spinal.core.sim._ + def bus = this + case class Cmd(address : Long, we : Boolean) + case class WData(data : BigInt, we : Long) + val cmdQueue = mutable.Queue[Cmd]() + val wdataQueue = mutable.Queue[WData]() + val rdataQueue = mutable.Queue[BigInt]() + + + case class Ref(address : Long, data : BigInt, we : Long, time : Long) + val ref = mutable.Queue[Ref]() + if(bmb != null) StreamMonitor(bmb.cmd, cd){p => + if(bmb.cmd.opcode.toInt == 1) ref.enqueue(Ref(p.fragment.address.toLong, p.fragment.data.toBigInt, p.fragment.mask.toLong, simTime())) + } + + var writeCmdCounter, writeDataCounter = 0 + StreamReadyRandomizer(bus.cmd, cd).factor = 0.5f + StreamMonitor(bus.cmd, cd) { t => + cmdQueue.enqueue(Cmd(t.addr.toLong * (p.dataWidth/8) , t.we.toBoolean)) + if(t.we.toBoolean) writeCmdCounter += 1 + } + + StreamReadyRandomizer(bus.wdata, cd).factor = 0.5f + StreamMonitor(bus.wdata, cd) { p => + writeDataCounter += 1 + // if(p.data.toBigInt == BigInt("00000002000000020000000200000002",16)){ + // println("ASD") + // } + wdataQueue.enqueue(WData(p.data.toBigInt, p.we.toLong)) + } + + // new SimStreamAssert(cmd,cd) + // new SimStreamAssert(wdata,cd) + // new SimStreamAssert(rdata,cd) + + cd.onSamplings{ + if(writeDataCounter-writeCmdCounter > 2){ + println("miaou") + } + if(cmdQueue.nonEmpty && Random.nextFloat() < 0.5){ + val cmd = cmdQueue.head + if(cmd.we){ + if(wdataQueue.nonEmpty){ + // if(cmd.address == 0xc02ae850l) { + // println(s"! $writeCmdCounter $writeDataCounter") + // } + cmdQueue.dequeue() + val wdata = wdataQueue.dequeue() + val raw = wdata.data.toByteArray + val left = wdata.data.toByteArray.size-1 + if(bmb != null){ + assert(ref.nonEmpty) + assert((ref.head.address & 0xFFFFFFF0l) == cmd.address) + assert(ref.head.data == wdata.data) + assert(ref.head.we == wdata.we) + ref.dequeue() + } + // if(cmd.address == 0xc02ae850l) { + // println(s"$cmd $wdata ${simTime()}") + // } + for(i <- 0 until p.dataWidth/8){ + + + if(((wdata.we >> i) & 1) != 0) { + // if(cmd.address == 0xc02ae850l) { + // println(s"W $i ${ if (left - i >= 0) raw(left - i) else 0}") + // } + ram.write(cmd.address + i, if (left - i >= 0) raw(left - i) else 0) + } + } + } + } else { + cmdQueue.dequeue() + val value = new Array[Byte](p.dataWidth/8+1) + val left = value.size-1 + for(i <- 0 until p.dataWidth/8) { + value(left-i) = ram.read(cmd.address+i) + } + rdataQueue.enqueue(BigInt(value)) + } + } + } + + StreamDriver(bus.rdata, cd){ p => + if(rdataQueue.isEmpty){ + false + } else { + p.data #= rdataQueue.dequeue() + true + } + } + } +} + + + +case class BmbToLiteDram(bmbParameter : BmbParameter, + liteDramParameter : LiteDramNativeParameter, + wdataFifoSize : Int, + rdataFifoSize : Int) extends Component{ + val io = new Bundle { + val input = slave(Bmb(bmbParameter)) + val output = master(LiteDramNative(liteDramParameter)) + } + + val resized = io.input.resize(liteDramParameter.dataWidth) + val unburstified = resized.unburstify() + case class Context() extends Bundle { + val context = Bits(unburstified.p.access.contextWidth bits) + val source = UInt(unburstified.p.access.sourceWidth bits) + val isWrite = Bool() + } + + assert(isPow2(rdataFifoSize)) + val pendingRead = Reg(UInt(log2Up(rdataFifoSize) + 1 bits)) init(0) + + val halt = Bool() + val (cmdFork, dataFork) = StreamFork2(unburstified.cmd.haltWhen(halt)) + val outputCmd = Stream(LiteDramNativeCmd(liteDramParameter)) + outputCmd.arbitrationFrom(cmdFork.haltWhen(pendingRead.msb)) + outputCmd.addr := (cmdFork.address >> log2Up(liteDramParameter.dataWidth/8)).resized + outputCmd.we := cmdFork.isWrite + + io.output.cmd <-< outputCmd + + if(bmbParameter.access.canWrite) { + val wData = Stream(LiteDramNativeWData(liteDramParameter)) + wData.arbitrationFrom(dataFork.throwWhen(dataFork.isRead)) + wData.data := dataFork.data + wData.we := dataFork.mask + io.output.wdata << wData.queueLowLatency(wdataFifoSize, latency = 1) + } else { + dataFork.ready := True + io.output.wdata.valid := False + io.output.wdata.data.assignDontCare() + io.output.wdata.we.assignDontCare() + } + + val cmdContext = Stream(Context()) + cmdContext.valid := unburstified.cmd.fire + cmdContext.context := unburstified.cmd.context + cmdContext.source := unburstified.cmd.source + cmdContext.isWrite := unburstified.cmd.isWrite + halt := !cmdContext.ready + + val rspContext = cmdContext.queue(rdataFifoSize) + val rdataFifo = io.output.rdata.queueLowLatency(rdataFifoSize, latency = 1) + val writeTocken = CounterUpDown( + stateCount = rdataFifoSize*2, + incWhen = io.output.wdata.fire, + decWhen = rspContext.fire && rspContext.isWrite + ) + val canRspWrite = writeTocken =/= 0 + val canRspRead = CombInit(rdataFifo.valid) + + rdataFifo.ready := unburstified.rsp.fire && !rspContext.isWrite + rspContext.ready := unburstified.rsp.fire + unburstified.rsp.valid := rspContext.valid && (rspContext.isWrite ? canRspWrite | canRspRead) + unburstified.rsp.setSuccess() + unburstified.rsp.last := True + unburstified.rsp.source := rspContext.source + unburstified.rsp.context := rspContext.context + unburstified.rsp.data := rdataFifo.data + + + pendingRead := pendingRead + U(outputCmd.fire && !outputCmd.we) - U(rdataFifo.fire) +} + +object BmbToLiteDramTester extends App{ + import spinal.core.sim._ + SimConfig.withWave.compile(BmbToLiteDram( + bmbParameter = BmbParameter( + addressWidth = 20, + dataWidth = 32, + lengthWidth = 6, + sourceWidth = 4, + contextWidth = 16 + ), + liteDramParameter = LiteDramNativeParameter( + addressWidth = 20, + dataWidth = 128 + ), + wdataFifoSize = 16, + rdataFifoSize = 16 + )).doSimUntilVoid(seed = 42){dut => + val tester = new BmbMemoryTester(dut.io.input, dut.clockDomain, rspCounterTarget = 3000) + dut.io.output.simSlave(tester.memory.memory, dut.clockDomain) + } +} + +case class BmbToLiteDramGenerator(mapping : AddressMapping)(implicit interconnect : BmbInterconnectGenerator) extends Area{ + val liteDramParameter = Handle[LiteDramNativeParameter] + val bmb = Handle(logic.io.input) + val dram = Handle(logic.io.output.toIo) + + val accessSource = Handle[BmbAccessCapabilities] + val accessRequirements = Handle[BmbAccessParameter] + interconnect.addSlave( + accessSource = accessSource, + accessCapabilities = accessSource, + accessRequirements = accessRequirements, + bus = bmb, + mapping = mapping + ) + val logic = Handle(BmbToLiteDram( + bmbParameter = accessRequirements.toBmbParameter(), + liteDramParameter = liteDramParameter, + wdataFifoSize = 32, + rdataFifoSize = 32 + )) +} + +case class BmbToWishboneGenerator(mapping : AddressMapping)(implicit interconnect : BmbInterconnectGenerator) extends Area{ + val bmb = Handle(logic.io.input) + val wishbone = Handle(logic.io.output) + + val accessSource = Handle[BmbAccessCapabilities] + val accessRequirements = Handle[BmbAccessParameter] + interconnect.addSlave( + accessSource = accessSource, + accessCapabilities = accessSource, + accessRequirements = accessRequirements, + bus = bmb, + mapping = mapping + ) + val logic = Handle(BmbToWishbone( + p = accessRequirements.toBmbParameter() + )) +} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpCluster.scala b/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpCluster.scala new file mode 100644 index 0000000..ec2aa50 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpCluster.scala @@ -0,0 +1,748 @@ +package vexriscv.demo.smp + +import spinal.core +import spinal.core._ +import spinal.core.sim.{onSimEnd, simSuccess} +import spinal.lib._ +import spinal.lib.bus.bmb.sim.BmbMemoryAgent +import spinal.lib.bus.bmb._ +import spinal.lib.bus.misc.{DefaultMapping, SizeMapping} +import spinal.lib.bus.wishbone.{Wishbone, WishboneConfig, WishboneToBmb, WishboneToBmbGenerator} +import spinal.lib.com.jtag.{Jtag, JtagInstructionDebuggerGenerator, JtagTapInstructionCtrl} +import spinal.lib.com.jtag.sim.JtagTcp +import spinal.lib.com.jtag.xilinx.Bscane2BmbMasterGenerator +import spinal.lib.generator._ +import spinal.core.fiber._ +import spinal.idslplugin.PostInitCallback +import spinal.lib.misc.plic.PlicMapping +import spinal.lib.system.debugger.SystemDebuggerConfig +import vexriscv.ip.{DataCacheAck, DataCacheConfig, DataCacheMemBus, InstructionCache, InstructionCacheConfig} +import vexriscv.plugin._ +import vexriscv.{Riscv, VexRiscv, VexRiscvBmbGenerator, VexRiscvConfig, plugin} + +import scala.collection.mutable +import scala.collection.mutable.ArrayBuffer +import spinal.lib.generator._ +import vexriscv.ip.fpu.FpuParameter + +case class VexRiscvSmpClusterParameter(cpuConfigs : Seq[VexRiscvConfig], + jtagHeaderIgnoreWidth : Int, + withExclusiveAndInvalidation : Boolean, + forcePeripheralWidth : Boolean = true, + outOfOrderDecoder : Boolean = true, + fpu : Boolean = false) + +class VexRiscvSmpClusterBase(p : VexRiscvSmpClusterParameter) extends Area with PostInitCallback{ + val cpuCount = p.cpuConfigs.size + + val debugCd = ClockDomainResetGenerator() + debugCd.holdDuration.load(4095) + debugCd.makeExternal() + + val systemCd = ClockDomainResetGenerator() + systemCd.holdDuration.load(63) + systemCd.setInput(debugCd) + + + val ctx = systemCd.outputClockDomain.push() + override def postInitCallback(): VexRiscvSmpClusterBase.this.type = { + ctx.restore() + this + } + + implicit val interconnect = BmbInterconnectGenerator() + + val debugBridge = debugCd.outputClockDomain on JtagInstructionDebuggerGenerator(p.jtagHeaderIgnoreWidth) + debugBridge.jtagClockDomain.load(ClockDomain.external("jtag", withReset = false)) + + val debugPort = Handle(debugBridge.logic.jtagBridge.io.ctrl.toIo) + + val dBusCoherent = BmbBridgeGenerator() + val dBusNonCoherent = BmbBridgeGenerator() + + val smp = p.withExclusiveAndInvalidation generate new Area{ + val exclusiveMonitor = BmbExclusiveMonitorGenerator() + interconnect.addConnection(dBusCoherent.bmb, exclusiveMonitor.input) + + val invalidationMonitor = BmbInvalidateMonitorGenerator() + interconnect.addConnection(exclusiveMonitor.output, invalidationMonitor.input) + interconnect.addConnection(invalidationMonitor.output, dBusNonCoherent.bmb) + if(p.outOfOrderDecoder) interconnect.masters(invalidationMonitor.output).withOutOfOrderDecoder() + } + + val noSmp = !p.withExclusiveAndInvalidation generate new Area{ + interconnect.addConnection(dBusCoherent.bmb, dBusNonCoherent.bmb) + } + + val cores = for(cpuId <- 0 until cpuCount) yield new Area{ + val cpu = VexRiscvBmbGenerator() + cpu.config.load(p.cpuConfigs(cpuId)) + interconnect.addConnection( + cpu.dBus -> List(dBusCoherent.bmb) + ) + cpu.enableDebugBmb( + debugCd = debugCd.outputClockDomain, + resetCd = systemCd, + mapping = SizeMapping(cpuId*0x1000, 0x1000) + ) + interconnect.addConnection(debugBridge.bmb, cpu.debugBmb) + } +} + + +class VexRiscvSmpClusterWithPeripherals(p : VexRiscvSmpClusterParameter) extends VexRiscvSmpClusterBase(p) { + val peripheralBridge = BmbToWishboneGenerator(DefaultMapping) + val peripheral = Handle(peripheralBridge.logic.io.output.toIo) + if(p.forcePeripheralWidth) interconnect.slaves(peripheralBridge.bmb).forceAccessSourceDataWidth(32) + + val plic = BmbPlicGenerator()(interconnect = null) + plic.priorityWidth.load(2) + plic.mapping.load(PlicMapping.sifive) + + val plicWishboneBridge = new Generator{ + dependencies += plic.ctrl + + plic.accessRequirements.load(BmbAccessParameter( + addressWidth = 22, + dataWidth = 32 + ).addSources(1, BmbSourceParameter( + contextWidth = 0, + lengthWidth = 2, + alignment = BmbParameter.BurstAlignement.LENGTH + ))) + + val logic = add task new Area{ + val bridge = WishboneToBmb(WishboneConfig(20, 32)) + bridge.io.output >> plic.ctrl + } + } + val plicWishbone = plicWishboneBridge.produceIo(plicWishboneBridge.logic.bridge.io.input) + + val clint = BmbClintGenerator(0)(interconnect = null) + val clintWishboneBridge = new Generator{ + dependencies += clint.ctrl + + clint.accessRequirements.load(BmbAccessParameter( + addressWidth = 16, + dataWidth = 32 + ).addSources(1, BmbSourceParameter( + contextWidth = 0, + lengthWidth = 2, + alignment = BmbParameter.BurstAlignement.LENGTH + ))) + + val logic = add task new Area{ + val bridge = WishboneToBmb(WishboneConfig(14, 32)) + bridge.io.output >> clint.ctrl + } + } + val clintWishbone = clintWishboneBridge.produceIo(clintWishboneBridge.logic.bridge.io.input) + + val interrupts = in Bits(32 bits) + for(i <- 1 to 31) yield plic.addInterrupt(interrupts(i), i) + + for ((core, cpuId) <- cores.zipWithIndex) { + core.cpu.setTimerInterrupt(clint.timerInterrupt(cpuId)) + core.cpu.setSoftwareInterrupt(clint.softwareInterrupt(cpuId)) + plic.priorityWidth.load(2) + plic.mapping.load(PlicMapping.sifive) + plic.addTarget(core.cpu.externalInterrupt) + plic.addTarget(core.cpu.externalSupervisorInterrupt) + List(clint.logic, core.cpu.logic).produce { + for (plugin <- core.cpu.config.plugins) plugin match { + case plugin: CsrPlugin if plugin.utime != null => plugin.utime := clint.logic.io.time + case _ => + } + } + } + + clint.cpuCount.load(cpuCount) +} + + +object VexRiscvSmpClusterGen { + def vexRiscvConfig(hartId : Int, + ioRange : UInt => Bool = (x => x(31 downto 28) === 0xF), + resetVector : Long = 0x80000000l, + iBusWidth : Int = 128, + dBusWidth : Int = 64, + loadStoreWidth : Int = 32, + coherency : Boolean = true, + atomic : Boolean = true, + iCacheSize : Int = 8192, + dCacheSize : Int = 8192, + iCacheWays : Int = 2, + dCacheWays : Int = 2, + iBusRelax : Boolean = false, + injectorStage : Boolean = false, + earlyBranch : Boolean = false, + earlyShifterInjection : Boolean = true, + dBusCmdMasterPipe : Boolean = false, + withMmu : Boolean = true, + withSupervisor : Boolean = true, + withFloat : Boolean = false, + withDouble : Boolean = false, + externalFpu : Boolean = true, + simHalt : Boolean = false, + decoderIsolationBench : Boolean = false, + decoderStupid : Boolean = false, + regfileRead : RegFileReadKind = plugin.ASYNC, + rvc : Boolean = false, + iTlbSize : Int = 4, + dTlbSize : Int = 4, + prediction : BranchPrediction = vexriscv.plugin.NONE, + withDataCache : Boolean = true, + withInstructionCache : Boolean = true, + forceMisa : Boolean = false, + forceMscratch : Boolean = false + ) = { + assert(iCacheSize/iCacheWays <= 4096, "Instruction cache ways can't be bigger than 4096 bytes") + assert(dCacheSize/dCacheWays <= 4096, "Data cache ways can't be bigger than 4096 bytes") + assert(!(withDouble && !withFloat)) + + val csrConfig = if(withSupervisor){ + CsrPluginConfig.openSbi(mhartid = hartId, misa = Riscv.misaToInt(s"ima${if(withFloat) "f" else ""}${if(withDouble) "d" else ""}s")).copy(utimeAccess = CsrAccess.READ_ONLY) + } else { + CsrPluginConfig( + catchIllegalAccess = true, + mvendorid = null, + marchid = null, + mimpid = null, + mhartid = hartId, + misaExtensionsInit = Riscv.misaToInt(s"ima${if(withFloat) "f" else ""}${if(withDouble) "d" else ""}s"), + misaAccess = if(forceMisa) CsrAccess.WRITE_ONLY else CsrAccess.NONE, + mtvecAccess = CsrAccess.READ_WRITE, + mtvecInit = null, + mepcAccess = CsrAccess.READ_WRITE, + mscratchGen = forceMscratch, + mcauseAccess = CsrAccess.READ_ONLY, + mbadaddrAccess = CsrAccess.READ_ONLY, + mcycleAccess = CsrAccess.NONE, + minstretAccess = CsrAccess.NONE, + ecallGen = true, + ebreakGen = true, + wfiGenAsWait = false, + wfiGenAsNop = true, + ucycleAccess = CsrAccess.NONE + ) + } + val config = VexRiscvConfig( + plugins = List( + if(withMmu)new MmuPlugin( + ioRange = ioRange + )else new StaticMemoryTranslatorPlugin( + ioRange = ioRange + ), + //Uncomment the whole IBusCachedPlugin and comment IBusSimplePlugin if you want cached iBus config + if(withInstructionCache) new IBusCachedPlugin( + resetVector = resetVector, + compressedGen = rvc, + prediction = prediction, + historyRamSizeLog2 = 9, + relaxPredictorAddress = true, + injectorStage = injectorStage, + relaxedPcCalculation = iBusRelax, + config = InstructionCacheConfig( + cacheSize = iCacheSize, + bytePerLine = 64, + wayCount = iCacheWays, + addressWidth = 32, + cpuDataWidth = 32, + memDataWidth = iBusWidth, + catchIllegalAccess = true, + catchAccessFault = true, + asyncTagMemory = false, + twoCycleRam = false, + twoCycleCache = true, + reducedBankWidth = true + ), + memoryTranslatorPortConfig = MmuPortConfig( + portTlbSize = iTlbSize, + latency = 1, + earlyRequireMmuLockup = true, + earlyCacheHits = true + ) + ) else new IBusSimplePlugin( + resetVector = resetVector, + cmdForkOnSecondStage = false, + cmdForkPersistence = false, + prediction = NONE, + catchAccessFault = false, + compressedGen = rvc, + busLatencyMin = 2, + vecRspBuffer = true + ), + if(withDataCache) new DBusCachedPlugin( + dBusCmdMasterPipe = dBusCmdMasterPipe || dBusWidth == 32, + dBusCmdSlavePipe = true, + dBusRspSlavePipe = true, + relaxedMemoryTranslationRegister = true, + config = new DataCacheConfig( + cacheSize = dCacheSize, + bytePerLine = 64, + wayCount = dCacheWays, + addressWidth = 32, + cpuDataWidth = loadStoreWidth, + memDataWidth = dBusWidth, + catchAccessError = true, + catchIllegal = true, + catchUnaligned = true, + withLrSc = atomic, + withAmo = atomic, + withExclusive = coherency, + withInvalidate = coherency, + withWriteAggregation = dBusWidth > 32 + ), + memoryTranslatorPortConfig = MmuPortConfig( + portTlbSize = dTlbSize, + latency = 1, + earlyRequireMmuLockup = true, + earlyCacheHits = true + ) + ) else new DBusSimplePlugin( + catchAddressMisaligned = false, + catchAccessFault = false, + earlyInjection = false + ), + new DecoderSimplePlugin( + catchIllegalInstruction = true, + decoderIsolationBench = decoderIsolationBench, + stupidDecoder = decoderStupid + ), + new RegFilePlugin( + regFileReadyKind = regfileRead, + zeroBoot = false, + x0Init = true + ), + new IntAluPlugin, + new SrcPlugin( + separatedAddSub = false + ), + new FullBarrelShifterPlugin(earlyInjection = earlyShifterInjection), + // new LightShifterPlugin, + new HazardSimplePlugin( + bypassExecute = true, + bypassMemory = true, + bypassWriteBack = true, + bypassWriteBackBuffer = true, + pessimisticUseSrc = false, + pessimisticWriteRegFile = false, + pessimisticAddressMatch = false + ), + new MulPlugin, + new MulDivIterativePlugin( + genMul = false, + genDiv = true, + mulUnrollFactor = 32, + divUnrollFactor = 1 + ), + new CsrPlugin(csrConfig), + new BranchPlugin( + earlyBranch = earlyBranch, + catchAddressMisaligned = true, + fenceiGenAsAJump = false + ), + new YamlPlugin(s"cpu$hartId.yaml") + ) + ) + + if(withFloat) config.plugins += new FpuPlugin( + externalFpu = externalFpu, + simHalt = simHalt, + p = FpuParameter(withDouble = withDouble) + ) + config + } + + +// def vexRiscvCluster(cpuCount : Int, resetVector : Long = 0x80000000l) = VexRiscvSmpCluster( +// debugClockDomain = ClockDomain.current.copy(reset = Bool().setName("debugResetIn")), +// p = VexRiscvSmpClusterParameter( +// cpuConfigs = List.tabulate(cpuCount) { +// vexRiscvConfig(_, resetVector = resetVector) +// } +// ) +// ) +// def main(args: Array[String]): Unit = { +// SpinalVerilog { +// vexRiscvCluster(4) +// } +// } +} +// +// +// +//object VexRiscvSmpClusterTestInfrastructure{ +// val REPORT_OFFSET = 0xF8000000 +// val REPORT_THREAD_ID = 0x00 +// val REPORT_THREAD_COUNT = 0x04 +// val REPORT_END = 0x08 +// val REPORT_BARRIER_START = 0x0C +// val REPORT_BARRIER_END = 0x10 +// val REPORT_CONSISTENCY_VALUES = 0x14 +// +// val PUTC = 0x00 +// val GETC = 0x04 +// val CLINT_ADDR = 0x10000 +// val CLINT_IPI_ADDR = CLINT_ADDR+0x0000 +// val CLINT_CMP_ADDR = CLINT_ADDR+0x4000 +// val CLINT_TIME_ADDR = CLINT_ADDR+0xBFF8 +// +// def ram(dut : VexRiscvSmpCluster, withStall : Boolean) = { +// import spinal.core.sim._ +// val cpuCount = dut.cpus.size +// val ram = new BmbMemoryAgent(0x100000000l){ +// case class Report(hart : Int, code : Int, data : Int){ +// override def toString: String = { +// f"CPU:$hart%2d ${code}%3x -> $data%3d" +// } +// } +// val reports = ArrayBuffer.fill(cpuCount)(ArrayBuffer[Report]()) +// +// +// val writeTable = mutable.HashMap[Int, Int => Unit]() +// val readTable = mutable.HashMap[Int, () => Int]() +// def onWrite(address : Int)(body : Int => Unit) = writeTable(address) = body +// def onRead(address : Int)(body : => Int) = readTable(address) = () => body +// +// var writeData = 0 +// var readData = 0 +// var reportWatchdog = 0 +// val cpuEnd = Array.fill(cpuCount)(false) +// val barriers = mutable.HashMap[Int, Int]() +// var consistancyCounter = 0 +// var consistancyLast = 0 +// var consistancyA = 0 +// var consistancyB = 0 +// var consistancyAB = 0 +// var consistancyNone = 0 +// +// onSimEnd{ +// for((list, hart) <- reports.zipWithIndex){ +// println(f"\n\n**** CPU $hart%2d ****") +// for((report, reportId) <- list.zipWithIndex){ +// println(f" $reportId%3d : ${report.code}%3x -> ${report.data}%3d") +// } +// } +// +// println(s"consistancy NONE:$consistancyNone A:$consistancyA B:$consistancyB AB:$consistancyAB") +// } +// +// override def setByte(address: Long, value: Byte): Unit = { +// if((address & 0xF0000000l) != 0xF0000000l) return super.setByte(address, value) +// val byteId = address & 3 +// val mask = 0xFF << (byteId*8) +// writeData = (writeData & ~mask) | ((value.toInt << (byteId*8)) & mask) +// if(byteId != 3) return +// val offset = (address & ~0xF0000000l)-3 +// // println(s"W[0x${offset.toHexString}] = $writeData @${simTime()}") +// offset match { +// case _ if offset >= 0x8000000 && offset < 0x9000000 => { +// val report = Report( +// hart = ((offset & 0xFF0000) >> 16).toInt, +// code = (offset & 0x00FFFF).toInt, +// data = writeData +// ) +//// println(report) +// reports(report.hart) += report +// reportWatchdog += 1 +// import report._ +// code match { +// case REPORT_THREAD_ID => assert(data == hart) +// case REPORT_THREAD_COUNT => assert(data == cpuCount) +// case REPORT_END => assert(data == 0); assert(cpuEnd(hart) == false); cpuEnd(hart) = true; if(!cpuEnd.exists(_ == false)) simSuccess() +// case REPORT_BARRIER_START => { +// val counter = barriers.getOrElse(data, 0) +// assert(counter < cpuCount) +// barriers(data) = counter + 1 +// } +// case REPORT_BARRIER_END => { +// val counter = barriers.getOrElse(data, 0) +// assert(counter == cpuCount) +// } +// case REPORT_CONSISTENCY_VALUES => consistancyCounter match { +// case 0 => { +// consistancyCounter = 1 +// consistancyLast = data +// } +// case 1 => { +// consistancyCounter = 0 +// (data, consistancyLast) match { +// case (666, 0) => consistancyA += 1 +// case (0, 666) => consistancyB += 1 +// case (666, 666) => consistancyAB += 1 +// case (0,0) => consistancyNone += 1; simFailure("Consistancy issue :(") +// } +// } +// } +// } +// } +// case _ => writeTable.get(offset.toInt) match { +// case Some(x) => x(writeData) +// case _ => simFailure(f"\n\nWrite at ${address-3}%8x with $writeData%8x") +// } +// } +// } +// +// override def getByte(address: Long): Byte = { +// if((address & 0xF0000000l) != 0xF0000000l) return super.getByte(address) +// val byteId = address & 3 +// val offset = (address & ~0xF0000000l) +// if(byteId == 0) readData = readTable.get(offset.toInt) match { +// case Some(x) => x() +// case _ => simFailure(f"\n\nRead at $address%8x") +// } +// (readData >> (byteId*8)).toByte +// } +// +// val clint = new { +// val cmp = Array.fill(cpuCount)(0l) +// var time = 0l +// periodicaly(100){ +// time += 10 +// var timerInterrupts = 0l +// for(i <- 0 until cpuCount){ +// if(cmp(i) < time) timerInterrupts |= 1l << i +// } +// dut.io.timerInterrupts #= timerInterrupts +// } +// +//// delayed(200*1000000){ +//// dut.io.softwareInterrupts #= 0xE +//// enableSimWave() +//// println("force IPI") +//// } +// } +// +// onWrite(PUTC)(data => print(data.toChar)) +// onRead(GETC)( if(System.in.available() != 0) System.in.read() else -1) +// +// dut.io.softwareInterrupts #= 0 +// dut.io.timerInterrupts #= 0 +// dut.io.externalInterrupts #= 0 +// dut.io.externalSupervisorInterrupts #= 0 +// onRead(CLINT_TIME_ADDR)(clint.time.toInt) +// onRead(CLINT_TIME_ADDR+4)((clint.time >> 32).toInt) +// for(hartId <- 0 until cpuCount){ +// onWrite(CLINT_IPI_ADDR + hartId*4) {data => +// val mask = 1l << hartId +// val value = (dut.io.softwareInterrupts.toLong & ~mask) | (if(data == 1) mask else 0) +// dut.io.softwareInterrupts #= value +// } +//// onRead(CLINT_CMP_ADDR + hartId*8)(clint.cmp(hartId).toInt) +//// onRead(CLINT_CMP_ADDR + hartId*8+4)((clint.cmp(hartId) >> 32).toInt) +// onWrite(CLINT_CMP_ADDR + hartId*8){data => clint.cmp(hartId) = (clint.cmp(hartId) & 0xFFFFFFFF00000000l) | data} +// onWrite(CLINT_CMP_ADDR + hartId*8+4){data => clint.cmp(hartId) = (clint.cmp(hartId) & 0x00000000FFFFFFFFl) | (data.toLong << 32)} +// } +// +// +// +// } +// dut.io.iMems.foreach(ram.addPort(_,0,dut.clockDomain,true, withStall)) +// ram.addPort(dut.io.dMem,0,dut.clockDomain,true, withStall) +// ram +// } +// def init(dut : VexRiscvSmpCluster): Unit ={ +// import spinal.core.sim._ +// dut.clockDomain.forkStimulus(10) +// dut.debugClockDomain.forkStimulus(10) +// dut.io.debugBus.cmd.valid #= false +// } +//} +// +//object VexRiscvSmpClusterTest extends App{ +// import spinal.core.sim._ +// +// val simConfig = SimConfig +// simConfig.withWave +// simConfig.allOptimisation +// simConfig.addSimulatorFlag("--threads 1") +// +// val cpuCount = 4 +// val withStall = true +// +// simConfig.compile(VexRiscvSmpClusterGen.vexRiscvCluster(cpuCount)).doSimUntilVoid(seed = 42){dut => +// disableSimWave() +// SimTimeout(100000000l*10*cpuCount) +// dut.clockDomain.forkSimSpeedPrinter(1.0) +// VexRiscvSmpClusterTestInfrastructure.init(dut) +// val ram = VexRiscvSmpClusterTestInfrastructure.ram(dut, withStall) +// ram.memory.loadBin(0x80000000l, "src/test/cpp/raw/smp/build/smp.bin") +// periodicaly(20000*10){ +// assert(ram.reportWatchdog != 0) +// ram.reportWatchdog = 0 +// } +// } +//} +// +//// echo "echo 10000 | dhrystone >> log" > test +//// time sh test & +//// top -b -n 1 +// +//// TODO +//// MultiChannelFifo.toStream arbitration +//// BmbDecoderOutOfOrder arbitration +//// DataCache to bmb invalidation that are more than single line +//object VexRiscvSmpClusterOpenSbi extends App{ +// import spinal.core.sim._ +// +// val simConfig = SimConfig +// simConfig.withWave +// simConfig.allOptimisation +// simConfig.addSimulatorFlag("--threads 1") +// +// val cpuCount = 2 +// val withStall = false +// +// def gen = { +// val dut = VexRiscvSmpClusterGen.vexRiscvCluster(cpuCount, resetVector = 0x80000000l) +// dut.cpus.foreach{cpu => +// cpu.core.children.foreach{ +// case cache : InstructionCache => cache.io.cpu.decode.simPublic() +// case _ => +// } +// } +// dut +// } +// +// simConfig.workspaceName("rawr_4c").compile(gen).doSimUntilVoid(seed = 42){dut => +//// dut.clockDomain.forkSimSpeedPrinter(1.0) +// VexRiscvSmpClusterTestInfrastructure.init(dut) +// val ram = VexRiscvSmpClusterTestInfrastructure.ram(dut, withStall) +//// ram.memory.loadBin(0x80000000l, "../opensbi/build/platform/spinal/vexriscv/sim/smp/firmware/fw_payload.bin") +// +//// ram.memory.loadBin(0x40F00000l, "/media/data/open/litex_smp/litex_vexriscv_smp/images/fw_jump.bin") +//// ram.memory.loadBin(0x40000000l, "/media/data/open/litex_smp/litex_vexriscv_smp/images/Image") +//// ram.memory.loadBin(0x40EF0000l, "/media/data/open/litex_smp/litex_vexriscv_smp/images/dtb") +//// ram.memory.loadBin(0x41000000l, "/media/data/open/litex_smp/litex_vexriscv_smp/images/rootfs.cpio") +// +// ram.memory.loadBin(0x80000000l, "../opensbi/build/platform/spinal/vexriscv/sim/smp/firmware/fw_jump.bin") +// ram.memory.loadBin(0xC0000000l, "../buildroot/output/images/Image") +// ram.memory.loadBin(0xC1000000l, "../buildroot/output/images/dtb") +// ram.memory.loadBin(0xC2000000l, "../buildroot/output/images/rootfs.cpio") +// +// import spinal.core.sim._ +// var iMemReadBytes, dMemReadBytes, dMemWriteBytes, iMemSequencial,iMemRequests, iMemPrefetchHit = 0l +// var reportTimer = 0 +// var reportCycle = 0 +// val iMemFetchDelta = mutable.HashMap[Long, Long]() +// var iMemFetchDeltaSorted : Seq[(Long, Long)] = null +// var dMemWrites, dMemWritesCached = 0l +// val dMemWriteCacheCtx = List(4,8,16,32,64).map(bytes => new { +// var counter = 0l +// var address = 0l +// val mask = ~((1 << log2Up(bytes))-1) +// }) +// +// import java.io._ +// val csv = new PrintWriter(new File("bench.csv" )) +// val iMemCtx = Array.tabulate(cpuCount)(i => new { +// var sequencialPrediction = 0l +// val cache = dut.cpus(i).core.children.find(_.isInstanceOf[InstructionCache]).head.asInstanceOf[InstructionCache].io.cpu.decode +// var lastAddress = 0l +// }) +// dut.clockDomain.onSamplings{ +// dut.io.time #= simTime()/10 +// +// +// for(i <- 0 until cpuCount; iMem = dut.io.iMems(i); ctx = iMemCtx(i)){ +//// if(iMem.cmd.valid.toBoolean && iMem.cmd.ready.toBoolean){ +//// val length = iMem.cmd.length.toInt + 1 +//// val address = iMem.cmd.address.toLong +//// iMemReadBytes += length +//// iMemRequests += 1 +//// } +// if(ctx.cache.isValid.toBoolean && !ctx.cache.mmuRefilling.toBoolean && !ctx.cache.mmuException.toBoolean){ +// val address = ctx.cache.physicalAddress.toLong +// val length = ctx.cache.p.bytePerLine.toLong +// val mask = ~(length-1) +// if(ctx.cache.cacheMiss.toBoolean) { +// iMemReadBytes += length +// if ((address & mask) == (ctx.sequencialPrediction & mask)) { +// iMemSequencial += 1 +// } +// } +// if(!ctx.cache.isStuck.toBoolean) { +// ctx.sequencialPrediction = address + length +// } +// } +// +// if(iMem.cmd.valid.toBoolean && iMem.cmd.ready.toBoolean){ +// val address = iMem.cmd.address.toLong +// iMemRequests += 1 +// if(iMemCtx(i).lastAddress + ctx.cache.p.bytePerLine == address){ +// iMemPrefetchHit += 1 +// } +// val delta = address-iMemCtx(i).lastAddress +// iMemFetchDelta(delta) = iMemFetchDelta.getOrElse(delta, 0l) + 1l +// if(iMemRequests % 1000 == 999) iMemFetchDeltaSorted = iMemFetchDelta.toSeq.sortBy(_._1) +// iMemCtx(i).lastAddress = address +// } +// } +// if(dut.io.dMem.cmd.valid.toBoolean && dut.io.dMem.cmd.ready.toBoolean){ +// if(dut.io.dMem.cmd.opcode.toInt == Bmb.Cmd.Opcode.WRITE){ +// dMemWriteBytes += dut.io.dMem.cmd.length.toInt+1 +// val address = dut.io.dMem.cmd.address.toLong +// dMemWrites += 1 +// for(ctx <- dMemWriteCacheCtx){ +// if((address & ctx.mask) == (ctx.address & ctx.mask)){ +// ctx.counter += 1 +// } else { +// ctx.address = address +// } +// } +// }else { +// dMemReadBytes += dut.io.dMem.cmd.length.toInt+1 +// for(ctx <- dMemWriteCacheCtx) ctx.address = -1 +// } +// } +// reportTimer = reportTimer + 1 +// reportCycle = reportCycle + 1 +// if(reportTimer == 400000){ +// reportTimer = 0 +//// println(f"\n** c=${reportCycle} ir=${iMemReadBytes*1e-6}%5.2f dr=${dMemReadBytes*1e-6}%5.2f dw=${dMemWriteBytes*1e-6}%5.2f **\n") +// +// +// csv.write(s"$reportCycle,$iMemReadBytes,$dMemReadBytes,$dMemWriteBytes,$iMemRequests,$iMemSequencial,$dMemWrites,${dMemWriteCacheCtx.map(_.counter).mkString(",")},$iMemPrefetchHit\n") +// csv.flush() +// reportCycle = 0 +// iMemReadBytes = 0 +// dMemReadBytes = 0 +// dMemWriteBytes = 0 +// iMemRequests = 0 +// iMemSequencial = 0 +// dMemWrites = 0 +// iMemPrefetchHit = 0 +// for(ctx <- dMemWriteCacheCtx) ctx.counter = 0 +// } +// } +// +// +//// fork{ +//// disableSimWave() +//// val atMs = 3790 +//// val durationMs = 5 +//// sleep(atMs*1000000) +//// enableSimWave() +//// println("** enableSimWave **") +//// sleep(durationMs*1000000) +//// println("** disableSimWave **") +//// while(true) { +//// disableSimWave() +//// sleep(100000 * 10) +//// enableSimWave() +//// sleep( 100 * 10) +//// } +////// simSuccess() +//// } +// +// fork{ +// while(true) { +// disableSimWave() +// sleep(100000 * 10) +// enableSimWave() +// sleep( 100 * 10) +// } +// } +// } +//} diff --git a/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpLitexCluster.scala b/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpLitexCluster.scala new file mode 100644 index 0000000..4cd4917 --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpLitexCluster.scala @@ -0,0 +1,322 @@ +package vexriscv.demo.smp + +import spinal.core._ +import spinal.core.fiber._ +import spinal.lib.bus.bmb._ +import spinal.lib.bus.misc.{AddressMapping, DefaultMapping, SizeMapping} +import spinal.lib.bus.wishbone.{WishboneConfig, WishboneToBmbGenerator} +import spinal.lib.generator.GeneratorComponent +import spinal.lib.sim.SparseMemory +import vexriscv.demo.smp.VexRiscvSmpClusterGen.vexRiscvConfig +import vexriscv.ip.fpu.{FpuCore, FpuParameter} +import vexriscv.plugin.{AesPlugin, DBusCachedPlugin, FpuPlugin} + + +case class VexRiscvLitexSmpClusterParameter( cluster : VexRiscvSmpClusterParameter, + liteDram : LiteDramNativeParameter, + liteDramMapping : AddressMapping, + coherentDma : Boolean, + wishboneMemory : Boolean, + cpuPerFpu : Int) + + +class VexRiscvLitexSmpCluster(p : VexRiscvLitexSmpClusterParameter) extends VexRiscvSmpClusterWithPeripherals(p.cluster) { + val iArbiter = BmbBridgeGenerator() + val iBridge = !p.wishboneMemory generate BmbToLiteDramGenerator(p.liteDramMapping) + val dBridge = !p.wishboneMemory generate BmbToLiteDramGenerator(p.liteDramMapping) + + for(core <- cores) interconnect.addConnection(core.cpu.iBus -> List(iArbiter.bmb)) + !p.wishboneMemory generate interconnect.addConnection( + iArbiter.bmb -> List(iBridge.bmb), + dBusNonCoherent.bmb -> List(dBridge.bmb) + ) + interconnect.addConnection( + iArbiter.bmb -> List(peripheralBridge.bmb), + dBusNonCoherent.bmb -> List(peripheralBridge.bmb) + ) + + val fpuGroups = (cores.reverse.grouped(p.cpuPerFpu)).toList.reverse + val fpu = p.cluster.fpu generate { for(group <- fpuGroups) yield new Area{ + val extraStage = group.size > 2 + + val logic = Handle{ + new FpuCore( + portCount = group.size, + p = FpuParameter( + withDouble = true, + asyncRegFile = false, + schedulerM2sPipe = extraStage + ) + ) + } + + val connect = Handle{ + for(i <- 0 until group.size; + vex = group(i).cpu.logic.cpu; + port = logic.io.port(i)) { + val plugin = vex.service(classOf[FpuPlugin]) + plugin.port.cmd.pipelined(m2s = false, s2m = false) >> port.cmd + plugin.port.commit.pipelined(m2s = extraStage, s2m = false) >> port.commit + plugin.port.completion := port.completion.m2sPipe() + plugin.port.rsp << port.rsp + } + } + }} + + if(p.cluster.withExclusiveAndInvalidation) interconnect.masters(dBusNonCoherent.bmb).withOutOfOrderDecoder() + + if(!p.wishboneMemory) { + dBridge.liteDramParameter.load(p.liteDram) + iBridge.liteDramParameter.load(p.liteDram) + } + + // Coherent DMA interface + val dma = p.coherentDma generate new Area { + val bridge = WishboneToBmbGenerator() + val wishbone = Handle(bridge.logic.io.input.toIo) + val dataWidth = p.cluster.cpuConfigs.head.find(classOf[DBusCachedPlugin]).get.config.memDataWidth + bridge.config.load(WishboneConfig( + addressWidth = 32 - log2Up(dataWidth / 8), + dataWidth = dataWidth, + useSTALL = true, + selWidth = dataWidth/8 + )) + interconnect.addConnection(bridge.bmb, dBusCoherent.bmb) + } + + // Interconnect pipelining (FMax) + for(core <- cores) { + interconnect.setPipelining(core.cpu.dBus)(cmdValid = true, cmdReady = true, rspValid = true, invValid = true, ackValid = true, syncValid = true) + interconnect.setPipelining(core.cpu.iBus)(cmdHalfRate = true, rspValid = true) + interconnect.setPipelining(iArbiter.bmb)(cmdHalfRate = true, rspValid = true) + } + interconnect.setPipelining(dBusCoherent.bmb)(cmdValid = true, cmdReady = true) + interconnect.setPipelining(dBusNonCoherent.bmb)(cmdValid = true, cmdReady = true, rspValid = true) + interconnect.setPipelining(peripheralBridge.bmb)(cmdHalfRate = !p.wishboneMemory, cmdValid = p.wishboneMemory, cmdReady = p.wishboneMemory, rspValid = true) + if(!p.wishboneMemory) { + interconnect.setPipelining(iBridge.bmb)(cmdHalfRate = true) + interconnect.setPipelining(dBridge.bmb)(cmdReady = true) + } +} + + +object VexRiscvLitexSmpClusterCmdGen extends App { + var cpuCount = 1 + var iBusWidth = 64 + var dBusWidth = 64 + var iCacheSize = 8192 + var dCacheSize = 8192 + var iCacheWays = 2 + var dCacheWays = 2 + var liteDramWidth = 128 + var coherentDma = false + var wishboneMemory = false + var outOfOrderDecoder = true + var aesInstruction = false + var fpu = false + var cpuPerFpu = 4 + var rvc = false + var netlistDirectory = "." + var netlistName = "VexRiscvLitexSmpCluster" + var iTlbSize = 4 + var dTlbSize = 4 + assert(new scopt.OptionParser[Unit]("VexRiscvLitexSmpClusterCmdGen") { + help("help").text("prints this usage text") + opt[Unit]("coherent-dma") action { (v, c) => coherentDma = true } + opt[String]("cpu-count") action { (v, c) => cpuCount = v.toInt } + opt[String]("ibus-width") action { (v, c) => iBusWidth = v.toInt } + opt[String]("dbus-width") action { (v, c) => dBusWidth = v.toInt } + opt[String]("icache-size") action { (v, c) => iCacheSize = v.toInt } + opt[String]("dcache-size") action { (v, c) => dCacheSize = v.toInt } + opt[String]("icache-ways") action { (v, c) => iCacheWays = v.toInt } + opt[String]("dcache-ways") action { (v, c) => dCacheWays = v.toInt } + opt[String]("litedram-width") action { (v, c) => liteDramWidth = v.toInt } + opt[String]("netlist-directory") action { (v, c) => netlistDirectory = v } + opt[String]("netlist-name") action { (v, c) => netlistName = v } + opt[String]("aes-instruction") action { (v, c) => aesInstruction = v.toBoolean } + opt[String]("out-of-order-decoder") action { (v, c) => outOfOrderDecoder = v.toBoolean } + opt[String]("wishbone-memory" ) action { (v, c) => wishboneMemory = v.toBoolean } + opt[String]("fpu" ) action { (v, c) => fpu = v.toBoolean } + opt[String]("cpu-per-fpu") action { (v, c) => cpuPerFpu = v.toInt } + opt[String]("rvc") action { (v, c) => rvc = v.toBoolean } + opt[String]("itlb-size") action { (v, c) => iTlbSize = v.toInt } + opt[String]("dtlb-size") action { (v, c) => dTlbSize = v.toInt } + }.parse(args)) + + val coherency = coherentDma || cpuCount > 1 + def parameter = VexRiscvLitexSmpClusterParameter( + cluster = VexRiscvSmpClusterParameter( + cpuConfigs = List.tabulate(cpuCount) { hartId => { + val c = vexRiscvConfig( + hartId = hartId, + ioRange = address => address.msb, + resetVector = 0, + iBusWidth = iBusWidth, + dBusWidth = dBusWidth, + iCacheSize = iCacheSize, + dCacheSize = dCacheSize, + iCacheWays = iCacheWays, + dCacheWays = dCacheWays, + coherency = coherency, + iBusRelax = true, + earlyBranch = true, + withFloat = fpu, + withDouble = fpu, + externalFpu = fpu, + loadStoreWidth = if(fpu) 64 else 32, + rvc = rvc, + injectorStage = rvc, + iTlbSize = iTlbSize, + dTlbSize = dTlbSize + ) + if(aesInstruction) c.add(new AesPlugin) + c + }}, + withExclusiveAndInvalidation = coherency, + forcePeripheralWidth = !wishboneMemory, + outOfOrderDecoder = outOfOrderDecoder, + fpu = fpu, + jtagHeaderIgnoreWidth = 0 + ), + liteDram = LiteDramNativeParameter(addressWidth = 32, dataWidth = liteDramWidth), + liteDramMapping = SizeMapping(0x40000000l, 0x40000000l), + coherentDma = coherentDma, + wishboneMemory = wishboneMemory, + cpuPerFpu = cpuPerFpu + ) + + def dutGen = { + val toplevel = new Component { + val body = new VexRiscvLitexSmpCluster( + p = parameter + ) + body.setName("") + } + toplevel + } + + val genConfig = SpinalConfig(targetDirectory = netlistDirectory, inlineRom = true).addStandardMemBlackboxing(blackboxByteEnables) + genConfig.generateVerilog(dutGen.setDefinitionName(netlistName)) + +} + + +//object VexRiscvLitexSmpClusterGen extends App { +// for(cpuCount <- List(1,2,4,8)) { +// def parameter = VexRiscvLitexSmpClusterParameter( +// cluster = VexRiscvSmpClusterParameter( +// cpuConfigs = List.tabulate(cpuCount) { hartId => +// vexRiscvConfig( +// hartId = hartId, +// ioRange = address => address.msb, +// resetVector = 0 +// ) +// }, +// withExclusiveAndInvalidation = true +// ), +// liteDram = LiteDramNativeParameter(addressWidth = 32, dataWidth = 128), +// liteDramMapping = SizeMapping(0x40000000l, 0x40000000l), +// coherentDma = false +// ) +// +// def dutGen = { +// val toplevel = new VexRiscvLitexSmpCluster( +// p = parameter +// ).toComponent() +// toplevel +// } +// +// val genConfig = SpinalConfig().addStandardMemBlackboxing(blackboxByteEnables) +// // genConfig.generateVerilog(Bench.compressIo(dutGen)) +// genConfig.generateVerilog(dutGen.setDefinitionName(s"VexRiscvLitexSmpCluster_${cpuCount}c")) +// } +//} + +////addAttribute("""mark_debug = "true"""") +object VexRiscvLitexSmpClusterOpenSbi extends App{ + import spinal.core.sim._ + + val simConfig = SimConfig + simConfig.withWave + simConfig.allOptimisation + + val cpuCount = 2 + + def parameter = VexRiscvLitexSmpClusterParameter( + cluster = VexRiscvSmpClusterParameter( + cpuConfigs = List.tabulate(cpuCount) { hartId => + vexRiscvConfig( + hartId = hartId, + ioRange = address => address(31 downto 28) === 0xF, + resetVector = 0x80000000l + ) + }, + withExclusiveAndInvalidation = true, + jtagHeaderIgnoreWidth = 0 + ), + liteDram = LiteDramNativeParameter(addressWidth = 32, dataWidth = 128), + liteDramMapping = SizeMapping(0x80000000l, 0x70000000l), + coherentDma = false, + wishboneMemory = false, + cpuPerFpu = 4 + ) + + def dutGen = { + import GeneratorComponent.toGenerator + val top = new Component { + val body = new VexRiscvLitexSmpCluster( + p = parameter + ) + } + top.rework{ + top.body.clintWishbone.setAsDirectionLess.allowDirectionLessIo + top.body.peripheral.setAsDirectionLess.allowDirectionLessIo.simPublic() + + val hit = (top.body.peripheral.ADR <<2 >= 0xF0010000l && top.body.peripheral.ADR<<2 < 0xF0020000l) + top.body.clintWishbone.CYC := top.body.peripheral.CYC && hit + top.body.clintWishbone.STB := top.body.peripheral.STB + top.body.clintWishbone.WE := top.body.peripheral.WE + top.body.clintWishbone.ADR := top.body.peripheral.ADR.resized + top.body.clintWishbone.DAT_MOSI := top.body.peripheral.DAT_MOSI + top.body.peripheral.DAT_MISO := top.body.clintWishbone.DAT_MISO + top.body.peripheral.ACK := top.body.peripheral.CYC && (!hit || top.body.clintWishbone.ACK) + top.body.peripheral.ERR := False + } + top + } + + simConfig.compile(dutGen).doSimUntilVoid(seed = 42){dut => + dut.body.debugCd.inputClockDomain.get.forkStimulus(10) + + val ram = SparseMemory() + ram.loadBin(0x80000000l, "../opensbi/build/platform/spinal/vexriscv/sim/smp/firmware/fw_jump.bin") + ram.loadBin(0xC0000000l, "../buildroot/output/images/Image") + ram.loadBin(0xC1000000l, "../buildroot/output/images/dtb") + ram.loadBin(0xC2000000l, "../buildroot/output/images/rootfs.cpio") + + + dut.body.iBridge.dram.simSlave(ram, dut.body.debugCd.inputClockDomain) + dut.body.dBridge.dram.simSlave(ram, dut.body.debugCd.inputClockDomain/*, dut.body.dMemBridge.unburstified*/) + + dut.body.interrupts #= 0 + + dut.body.debugCd.inputClockDomain.get.onFallingEdges{ + if(dut.body.peripheral.CYC.toBoolean){ + (dut.body.peripheral.ADR.toLong << 2) match { + case 0xF0000000l => print(dut.body.peripheral.DAT_MOSI.toLong.toChar) + case 0xF0000004l => dut.body.peripheral.DAT_MISO #= (if(System.in.available() != 0) System.in.read() else 0xFFFFFFFFl) + case _ => + } + } + } + + fork{ + while(true) { + disableSimWave() + sleep(100000 * 10) + enableSimWave() + sleep( 100 * 10) + } + } + } +}
\ No newline at end of file diff --git a/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpLitexMpCluster.scala b/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpLitexMpCluster.scala new file mode 100644 index 0000000..e662dfe --- /dev/null +++ b/VexRiscv/src/main/scala/vexriscv/demo/smp/VexRiscvSmpLitexMpCluster.scala @@ -0,0 +1,409 @@ +package vexriscv.demo.smp + +import spinal.core._ +import spinal.lib.bus.bmb._ +import spinal.lib.bus.misc.{AddressMapping, DefaultMapping, SizeMapping} +import spinal.lib.bus.wishbone.{WishboneConfig, WishboneToBmbGenerator} +import spinal.lib.sim.SparseMemory +import vexriscv.demo.smp.VexRiscvSmpClusterGen.vexRiscvConfig + +//case class VexRiscvLitexSmpMpClusterParameter( cluster : VexRiscvSmpClusterParameter, +// liteDram : LiteDramNativeParameter, +// liteDramMapping : AddressMapping) +// +//class VexRiscvLitexSmpMpCluster(p : VexRiscvLitexSmpMpClusterParameter) extends VexRiscvSmpClusterWithPeripherals(p.cluster) { +// val iArbiter = BmbBridgeGenerator() +// val iBridge = BmbToLiteDramGenerator(p.liteDramMapping) +// val dBridge = BmbToLiteDramGenerator(p.liteDramMapping) +// +// for(core <- cores) interconnect.addConnection(core.cpu.iBus -> List(iArbiter.bmb)) +// interconnect.addConnection( +// iArbiter.bmb -> List(iBridge.bmb, peripheralBridge.bmb), +// invalidationMonitor.output -> List(dBridge.bmb, peripheralBridge.bmb) +// ) +// interconnect.masters(invalidationMonitor.output).withOutOfOrderDecoder() +// +// dBridge.liteDramParameter.load(p.liteDram) +// iBridge.liteDramParameter.load(p.liteDram) +// +// // Interconnect pipelining (FMax) +// for(core <- cores) { +// interconnect.setPipelining(core.cpu.dBus)(cmdValid = true, cmdReady = true, rspValid = true) +// interconnect.setPipelining(core.cpu.iBus)(cmdHalfRate = true, rspValid = true) +// interconnect.setPipelining(iArbiter.bmb)(cmdHalfRate = true, rspValid = true) +// } +// interconnect.setPipelining(invalidationMonitor.output)(cmdValid = true, cmdReady = true, rspValid = true) +// interconnect.setPipelining(peripheralBridge.bmb)(cmdHalfRate = true, rspValid = true) +//} +// +// +//object VexRiscvLitexSmpMpClusterGen extends App { +// for(cpuCount <- List(1,2,4,8)) { +// def parameter = VexRiscvLitexSmpMpClusterParameter( +// cluster = VexRiscvSmpClusterParameter( +// cpuConfigs = List.tabulate(cpuCount) { hartId => +// vexRiscvConfig( +// hartId = hartId, +// ioRange = address => address.msb, +// resetVector = 0 +// ) +// } +// ), +// liteDram = LiteDramNativeParameter(addressWidth = 32, dataWidth = 128), +// liteDramMapping = SizeMapping(0x40000000l, 0x40000000l) +// ) +// +// def dutGen = { +// val toplevel = new VexRiscvLitexSmpMpCluster( +// p = parameter +// ).toComponent() +// toplevel +// } +// +// val genConfig = SpinalConfig().addStandardMemBlackboxing(blackboxByteEnables) +// // genConfig.generateVerilog(Bench.compressIo(dutGen)) +// genConfig.generateVerilog(dutGen.setDefinitionName(s"VexRiscvLitexSmpMpCluster_${cpuCount}c")) +// } +//} + + + +// +////addAttribute("""mark_debug = "true"""") +//class VexRiscvLitexSmpMpCluster(val p : VexRiscvLitexSmpMpClusterParameter, +// val debugClockDomain : ClockDomain, +// val jtagClockDomain : ClockDomain) extends Component{ +// +// val peripheralWishboneConfig = WishboneConfig( +// addressWidth = 30, +// dataWidth = 32, +// selWidth = 4, +// useERR = true, +// useBTE = true, +// useCTI = true +// ) +// +// val cpuCount = p.cluster.cpuConfigs.size +// +// val io = new Bundle { +// val dMem = Vec(master(LiteDramNative(p.liteDram)), cpuCount) +// val iMem = Vec(master(LiteDramNative(p.liteDram)), cpuCount) +// val peripheral = master(Wishbone(peripheralWishboneConfig)) +// val clint = slave(Wishbone(Clint.getWisboneConfig())) +// val plic = slave(Wishbone(WishboneConfig(addressWidth = 20, dataWidth = 32))) +// val interrupts = in Bits(32 bits) +// val jtagInstruction = slave(JtagTapInstructionCtrl()) +// val debugReset = out Bool() +// } +// val clint = Clint(cpuCount) +// clint.driveFrom(WishboneSlaveFactory(io.clint)) +// +// val cluster = VexRiscvSmpCluster(p.cluster, debugClockDomain) +// cluster.io.debugReset <> io.debugReset +// cluster.io.timerInterrupts <> B(clint.harts.map(_.timerInterrupt)) +// cluster.io.softwareInterrupts <> B(clint.harts.map(_.softwareInterrupt)) +// cluster.io.time := clint.time +// +// val debug = debugClockDomain on new Area{ +// val jtagConfig = SystemDebuggerConfig() +// +// val jtagBridge = new JtagBridgeNoTap(jtagConfig, jtagClockDomain) +// jtagBridge.io.ctrl << io.jtagInstruction +// +// val debugger = new SystemDebugger(jtagConfig) +// debugger.io.remote <> jtagBridge.io.remote +// +// cluster.io.debugBus << debugger.io.mem.toBmb() +// +//// io.jtagInstruction.allowDirectionLessIo.setAsDirectionLess +//// val bridge = Bscane2BmbMaster(1) +//// cluster.io.debugBus << bridge.io.bmb +// +// +//// val bscane2 = BSCANE2(usedId) +//// val jtagClockDomain = ClockDomain(bscane2.TCK) +//// +//// val jtagBridge = new JtagBridgeNoTap(jtagConfig, jtagClockDomain) +//// jtagBridge.io.ctrl << bscane2.toJtagTapInstructionCtrl() +//// +//// val debugger = new SystemDebugger(jtagConfig) +//// debugger.io.remote <> jtagBridge.io.remote +//// +//// io.bmb << debugger.io.mem.toBmb() +// } +// +// val dBusDecoder = BmbDecoderOutOfOrder( +// p = cluster.io.dMem.p, +// mappings = Seq(DefaultMapping, p.liteDramMapping), +// capabilities = Seq(cluster.io.dMem.p, cluster.io.dMem.p), +// pendingRspTransactionMax = 32 +// ) +//// val dBusDecoder = BmbDecoderOut( +//// p = cluster.io.dMem.p, +//// mappings = Seq(DefaultMapping, p.liteDramMapping), +//// capabilities = Seq(cluster.io.dMem.p, cluster.io.dMem.p), +//// pendingMax = 31 +//// ) +// dBusDecoder.io.input << cluster.io.dMem.pipelined(cmdValid = true, cmdReady = true, rspValid = true) +// +// +// val perIBus = for(id <- 0 until cpuCount) yield new Area{ +// val decoder = BmbDecoder( +// p = cluster.io.iMems(id).p, +// mappings = Seq(DefaultMapping, p.liteDramMapping), +// capabilities = Seq(cluster.io.iMems(id).p,cluster.io.iMems(id).p), +// pendingMax = 15 +// ) +// +// decoder.io.input << cluster.io.iMems(id) +// io.iMem(id).fromBmb(decoder.io.outputs(1).pipelined(cmdHalfRate = true), wdataFifoSize = 0, rdataFifoSize = 32) +// val toPeripheral = decoder.io.outputs(0).resize(dataWidth = 32).pipelined(cmdHalfRate = true, rspValid = true) +// } +// +// val dBusDecoderToPeripheral = dBusDecoder.io.outputs(0).resize(dataWidth = 32).pipelined(cmdHalfRate = true, rspValid = true) +// +// val peripheralAccessLength = Math.max(perIBus(0).toPeripheral.p.lengthWidth, dBusDecoder.io.outputs(0).p.lengthWidth) +// val peripheralArbiter = BmbArbiter( +// p = dBusDecoder.io.outputs(0).p.copy( +// sourceWidth = List(perIBus(0).toPeripheral, dBusDecoderToPeripheral).map(_.p.sourceWidth).max + log2Up(cpuCount + 1), +// contextWidth = List(perIBus(0).toPeripheral, dBusDecoderToPeripheral).map(_.p.contextWidth).max, +// lengthWidth = peripheralAccessLength, +// dataWidth = 32 +// ), +// portCount = cpuCount+1, +// lowerFirstPriority = true +// ) +// +// for(id <- 0 until cpuCount){ +// peripheralArbiter.io.inputs(id) << perIBus(id).toPeripheral +// } +// peripheralArbiter.io.inputs(cpuCount) << dBusDecoderToPeripheral +// +// val peripheralWishbone = peripheralArbiter.io.output.pipelined(cmdValid = true).toWishbone() +// io.peripheral << peripheralWishbone +// +// +// val dBusDemux = BmbSourceDecoder(dBusDecoder.io.outputs(1).p) +// dBusDemux.io.input << dBusDecoder.io.outputs(1).pipelined(cmdValid = true, cmdReady = true,rspValid = true) +// val dMemBridge = for(id <- 0 until cpuCount) yield { +// io.dMem(id).fromBmb(dBusDemux.io.outputs(id), wdataFifoSize = 32, rdataFifoSize = 32) +// } +// +// +// val plic = new Area{ +// val priorityWidth = 2 +// +// val gateways = for(i <- 1 until 32) yield PlicGatewayActiveHigh( +// source = io.interrupts(i), +// id = i, +// priorityWidth = priorityWidth +// ) +// +// val bus = WishboneSlaveFactory(io.plic) +// +// val targets = for(i <- 0 until cpuCount) yield new Area{ +// val machine = PlicTarget( +// gateways = gateways, +// priorityWidth = priorityWidth +// ) +// val supervisor = PlicTarget( +// gateways = gateways, +// priorityWidth = priorityWidth +// ) +// +// cluster.io.externalInterrupts(i) := machine.iep +// cluster.io.externalSupervisorInterrupts(i) := supervisor.iep +// } +// +// val bridge = PlicMapper(bus, PlicMapping.sifive)( +// gateways = gateways, +// targets = targets.flatMap(t => List(t.machine, t.supervisor)) +// ) +// } +//// +//// io.dMem.foreach(_.cmd.valid.addAttribute("""mark_debug = "true"""")) +//// io.dMem.foreach(_.cmd.ready.addAttribute("""mark_debug = "true"""")) +//// io.iMem.foreach(_.cmd.valid.addAttribute("""mark_debug = "true"""")) +//// io.iMem.foreach(_.cmd.ready.addAttribute("""mark_debug = "true"""")) +//// +//// cluster.io.dMem.cmd.valid.addAttribute("""mark_debug = "true"""") +//// cluster.io.dMem.cmd.ready.addAttribute("""mark_debug = "true"""") +//// cluster.io.dMem.rsp.valid.addAttribute("""mark_debug = "true"""") +//// cluster.io.dMem.rsp.ready.addAttribute("""mark_debug = "true"""") +//} +// +//object VexRiscvLitexSmpMpClusterGen extends App { +// for(cpuCount <- List(1,2,4,8)) { +// def parameter = VexRiscvLitexSmpMpClusterParameter( +// cluster = VexRiscvSmpClusterParameter( +// cpuConfigs = List.tabulate(cpuCount) { hartId => +// vexRiscvConfig( +// hartId = hartId, +// ioRange = address => address.msb, +// resetVector = 0 +// ) +// } +// ), +// liteDram = LiteDramNativeParameter(addressWidth = 32, dataWidth = 128), +// liteDramMapping = SizeMapping(0x40000000l, 0x40000000l) +// ) +// +// def dutGen = { +// val toplevel = new VexRiscvLitexSmpMpCluster( +// p = parameter, +// debugClockDomain = ClockDomain.current.copy(reset = Bool().setName("debugResetIn")), +// jtagClockDomain = ClockDomain.external("jtag", withReset = false) +// ) +// toplevel +// } +// +// val genConfig = SpinalConfig().addStandardMemBlackboxing(blackboxByteEnables) +// // genConfig.generateVerilog(Bench.compressIo(dutGen)) +// genConfig.generateVerilog(dutGen.setDefinitionName(s"VexRiscvLitexSmpMpCluster_${cpuCount}c")) +// } +// +//} +// +// +//object VexRiscvLitexSmpMpClusterOpenSbi extends App{ +// import spinal.core.sim._ +// +// val simConfig = SimConfig +// simConfig.withWave +// simConfig.withFstWave +// simConfig.allOptimisation +// +// val cpuCount = 2 +// +// def parameter = VexRiscvLitexSmpMpClusterParameter( +// cluster = VexRiscvSmpClusterParameter( +// cpuConfigs = List.tabulate(cpuCount) { hartId => +// vexRiscvConfig( +// hartId = hartId, +// ioRange = address => address(31 downto 28) === 0xF, +// resetVector = 0x80000000l +// ) +// } +// ), +// liteDram = LiteDramNativeParameter(addressWidth = 32, dataWidth = 128), +// liteDramMapping = SizeMapping(0x80000000l, 0x70000000l) +// ) +// +// def dutGen = { +// val top = new VexRiscvLitexSmpMpCluster( +// p = parameter, +// debugClockDomain = ClockDomain.current.copy(reset = Bool().setName("debugResetIn")), +// jtagClockDomain = ClockDomain.external("jtag", withReset = false) +// ){ +// io.jtagInstruction.allowDirectionLessIo.setAsDirectionLess +// val jtag = slave(Jtag()) +// jtagClockDomain.readClockWire.setAsDirectionLess() := jtag.tck +// val jtagLogic = jtagClockDomain on new Area{ +// val tap = new JtagTap(jtag, 4) +// val idcodeArea = tap.idcode(B"x10001FFF")(1) +// val wrapper = tap.map(io.jtagInstruction, instructionId = 2) +// } +// } +// top.rework{ +// top.io.clint.setAsDirectionLess.allowDirectionLessIo +// top.io.peripheral.setAsDirectionLess.allowDirectionLessIo.simPublic() +// +// val hit = (top.io.peripheral.ADR <<2 >= 0xF0010000l && top.io.peripheral.ADR<<2 < 0xF0020000l) +// top.io.clint.CYC := top.io.peripheral.CYC && hit +// top.io.clint.STB := top.io.peripheral.STB +// top.io.clint.WE := top.io.peripheral.WE +// top.io.clint.ADR := top.io.peripheral.ADR.resized +// top.io.clint.DAT_MOSI := top.io.peripheral.DAT_MOSI +// top.io.peripheral.DAT_MISO := top.io.clint.DAT_MISO +// top.io.peripheral.ACK := top.io.peripheral.CYC && (!hit || top.io.clint.ACK) +// top.io.peripheral.ERR := False +// +//// top.dMemBridge.unburstified.cmd.simPublic() +// } +// top +// } +// simConfig.compile(dutGen).doSimUntilVoid(seed = 42){dut => +// dut.clockDomain.forkStimulus(10) +// fork { +// dut.debugClockDomain.resetSim #= false +// sleep (0) +// dut.debugClockDomain.resetSim #= true +// sleep (10) +// dut.debugClockDomain.resetSim #= false +// } +// +// JtagTcp(dut.jtag, 10*20) +// +// val ram = SparseMemory() +// ram.loadBin(0x80000000l, "../opensbi/build/platform/spinal/vexriscv/sim/smp/firmware/fw_jump.bin") +// ram.loadBin(0xC0000000l, "../buildroot/output/images/Image") +// ram.loadBin(0xC1000000l, "../buildroot/output/images/dtb") +// ram.loadBin(0xC2000000l, "../buildroot/output/images/rootfs.cpio") +// +// for(id <- 0 until cpuCount) { +// dut.io.iMem(id).simSlave(ram, dut.clockDomain) +// dut.io.dMem(id).simSlave(ram, dut.clockDomain) +// } +// +// dut.io.interrupts #= 0 +// +// +//// val stdin = mutable.Queue[Byte]() +//// def stdInPush(str : String) = stdin ++= str.toCharArray.map(_.toByte) +//// fork{ +//// sleep(4000*1000000l) +//// stdInPush("root\n") +//// sleep(1000*1000000l) +//// stdInPush("ping localhost -i 0.01 > /dev/null &\n") +//// stdInPush("ping localhost -i 0.01 > /dev/null &\n") +//// stdInPush("ping localhost -i 0.01 > /dev/null &\n") +//// stdInPush("ping localhost -i 0.01 > /dev/null &\n") +//// sleep(500*1000000l) +//// while(true){ +//// sleep(500*1000000l) +//// stdInPush("uptime\n") +//// printf("\n** uptime **") +//// } +//// } +// dut.clockDomain.onFallingEdges { +// if (dut.io.peripheral.CYC.toBoolean) { +// (dut.io.peripheral.ADR.toLong << 2) match { +// case 0xF0000000l => print(dut.io.peripheral.DAT_MOSI.toLong.toChar) +// case 0xF0000004l => dut.io.peripheral.DAT_MISO #= (if (System.in.available() != 0) System.in.read() else 0xFFFFFFFFl) +// case _ => +// // case 0xF0000004l => { +// // val c = if(stdin.nonEmpty) { +// // stdin.dequeue().toInt & 0xFF +// // } else { +// // 0xFFFFFFFFl +// // } +// // dut.io.peripheral.DAT_MISO #= c +// // } +// // case _ => +// // } +// // println(f"${dut.io.peripheral.ADR.toLong}%x") +// } +// } +// } +// +// fork{ +// val at = 0 +// val duration = 1000 +// while(simTime() < at*1000000l) { +// disableSimWave() +// sleep(100000 * 10) +// enableSimWave() +// sleep( 200 * 10) +// } +// println("\n\n********************") +// sleep(duration*1000000l) +// println("********************\n\n") +// while(true) { +// disableSimWave() +// sleep(100000 * 10) +// enableSimWave() +// sleep( 400 * 10) +// } +// } +// } +//}
\ No newline at end of file |