VHDL 8-bit adder with carry & testbench - vhdl

Heyo!
I've been trying to get into programming vhdl again for some upcoming classes and tried to do an simple 8-bit adder and wanted to test it with a testbench. (I'm working with the Vivado Xilinx Software btw~)
I don't get any syntax errors, but it shows the variables as "U" (I guess undefined?)
Hope it's easy to see what I did (and why lol). Yea but I can't really find the problem??
My code:
library IEEE;
use IEEE.STD_LOGIC_1164.ALL;
entity addierer is
Port ( a : in STD_LOGIC_VECTOR(7 downto 0);
b : in STD_LOGIC_VECTOR(7 downto 0);
cin : in STD_LOGIC;
s : out STD_LOGIC_VECTOR(7 downto 0);
cout : out STD_LOGIC);
end addierer;
architecture Behavioral of addierer is
COMPONENT volladdierer is
Port ( a : in STD_LOGIC;
b : in STD_LOGIC;
cin : in STD_LOGIC;
s : out STD_LOGIC;
cout : out STD_LOGIC
);
end COMPONENT;
signal c : STD_LOGIC_VECTOR(6 downto 0);
begin
PROCESS (a,b,cin)
BEGIN
s(0) <= a(0) xor b(0) xor cin; --erstes s wird noch mit cin berechnet
c(0) <= (cin and b(0)) or (cin and a(0)) or (a(0) and b(0)); --sowie das erste cout
for i in 1 to 7 loop --Schleife um Stellen der Arrays durchzugehen
s(i) <= a(i) xor b(i) xor c(i-1);
c(i) <= (c(i-1) and b(i)) or (c(i-1) and a(i)) or (a(i) and b(i));
end loop;
cout <= c(6);
END PROCESS;
end Behavioral;
testbench I used:
library IEEE;
use IEEE.STD_LOGIC_1164.ALL;
entity testbench_addierer is
-- Port ( );
end testbench_addierer;
architecture Behavioral of testbench_addierer is
SIGNAL a : STD_LOGIC_VECTOR(7 downto 0);
SIGNAL b : STD_LOGIC_VECTOR(7 downto 0);
SIGNAL cin : STD_LOGIC;
SIGNAL s : STD_LOGIC_VECTOR(7 downto 0);
SIGNAL cout : STD_LOGIC;
COMPONENT addierer IS
PORT(
a : IN STD_LOGIC_VECTOR(7 downto 0);
b : IN STD_LOGIC_VECTOR(7 downto 0);
cin : IN STD_LOGIC;
s : OUT STD_LOGIC_VECTOR(7 downto 0);
cout: OUT STD_LOGIC
);
END COMPONENT;
begin
U1: addierer
PORT MAP(
a => a,
b => b,
cin => cin,
s => s,
cout => cout
);
process
begin
a <= "00000000" after 0ms;
a <= "00000001" after 10ms;
a <= "01010101" after 20ms;
b <= "00000000" after 0ms;
b <= "10001001" after 10ms;
b <= "00000001" after 20ms;
cin <= '0' after 0ms;
cin <= '0' after 10ms;
cin <= '0' after 20ms;
end process;
end Behavioral;
Thanks already and hmu if something's unclear in my code!!
(and pls keep in mind I'm an total beginner.. xD)

Related

I have a vivando project and when I try to create a port map in one of my vhdl programs,I get errors that I don't know how to resolve

I have this vhdl file that I am trying to make a port map for in vivando, but I keep getting errors that I don't understand. I'm relatively new to vhdl and would appreciate any assistance in helping me figure out what I need to change.
This is the program with the port map
LIBRARY IEEE;
USE IEEE.STD_LOGIC_1164.ALL;
ENTITY DispAlu4 IS
PORT(
a: IN STD_LOGIC_VECTOR(3 DOWNTO 0); -- Input SW[7..4]: a[3..0]
b: IN STD_LOGIC_VECTOR(3 DOWNTO 0); -- Input SW[3..0]: b[3..0]
control: IN STD_LOGIC_VECTOR(1 DOWNTO 0); -- Input SW[15..14]: control[1..0]
led15: OUT STD_LOGIC; -- Output LED[15]: overflow
led17: OUT STD_LOGIC; -- Output LED[17]: zero
led16: OUT STD_LOGIC; -- Output LED[16]: cOut
an: OUT STD_LOGIC_VECTOR(7 DOWNTO 0); -- Output AN[7..0]: '0' enabled
hex: OUT STD_LOGIC_VECTOR(6 DOWNTO 0) -- Output HEX[6..0]: result[3..0]
);
END DispAlu4;
ARCHITECTURE behavioral OF DispAlu4 IS
COMPONENT Alu4
PORT(
a, b: IN STD_LOGIC_VECTOR(3 DOWNTO 0);
control: IN STD_LOGIC_VECTOR(1 DOWNTO 0);
overflow: OUT STD_LOGIC;
zero: OUT STD_LOGIC;
cOut: OUT STD_LOGIC;
result: OUT STD_LOGIC_VECTOR(3 DOWNTO 0)
);
END COMPONENT;
COMPONENT Bin2Hex
PORT(
bin: IN STD_LOGIC_VECTOR(3 DOWNTO 0);
hex: OUT STD_LOGIC_VECTOR(6 DOWNTO 0)
);
END COMPONENT;
SIGNAL overflow_sig: STD_LOGIC;
SIGNAL zero_sig: STD_LOGIC;
SIGNAL carry_sig: STD_LOGIC;
SIGNAL result_sig: STD_LOGIC_VECTOR(3 DOWNTO 0);
BEGIN
U1: Alu4 PORT MAP (a, b, control_sig => control, overflow_sig, zero_sig, result_sig, carry_sig); #port map that has errors
END behavioral;
These are the other programs referenced in it:
LIBRARY IEEE;
USE IEEE.STD_LOGIC_1164.ALL;
ENTITY Alu4 IS
GENERIC( CONSTANT N: INTEGER := 4; -- 4 bits ALU
CONSTANT Z: STD_LOGIC_VECTOR(3 DOWNTO 1) := "000" -- 3 Zeros
);
PORT(
a, b: IN STD_LOGIC_VECTOR(N-1 DOWNTO 0);
control: IN STD_LOGIC_VECTOR(1 DOWNTO 0);
overflow: OUT STD_LOGIC;
zero: OUT STD_LOGIC;
cOut: OUT STD_LOGIC;
result: OUT STD_LOGIC_VECTOR(N-1 DOWNTO 0)
);
END Alu4;
ARCHITECTURE behavioral OF Alu4 IS
COMPONENT Alu1
PORT(
a: IN STD_LOGIC;
b: IN STD_LOGIC;
cIn: IN STD_LOGIC;
control: IN STD_LOGIC_VECTOR(1 DOWNTO 0);
cOut: OUT STD_LOGIC;
result: OUT STD_LOGIC
);
END COMPONENT;
SIGNAL carry_sig: STD_LOGIC_VECTOR(N DOWNTO 0); -- carry_sig(N) = MSB cOut
SIGNAL result_sig: STD_LOGIC_VECTOR(N-1 DOWNTO 0);
BEGIN
process (a, b, control)
BEGIN
case control is
WHEN "000" =>
result_sig <= a AND b;
WHEN "001" =>
result_sig <= a OR b;
WHEN "010" =>
result_sig <= carry_sig;
WHEN "011" =>
result_sig <= carry_sig;
WHEN "100" =>
result_sig <= NOT a;
WHEN "101" =>
result_sig <= a XOR b;
WHEN "110" =>
result_sig <= carry_sig;
WHEN "111" =>
result_sig <= NOT b;
WHEN others =>
NULL;
END case;
END process;
END behavioral;
2nd program
LIBRARY IEEE;
USE IEEE.STD_LOGIC_1164.ALL;
ENTITY Bin2Hex IS
PORT(
bin: IN STD_LOGIC_VECTOR(3 DOWNTO 0); --4-bit binary inputs
hex: OUT STD_LOGIC_VECTOR(6 DOWNTO 0) --7-segment hex display
);
END Bin2Hex;
ARCHITECTURE behavioral OF Bin2Hex IS
BEGIN
WITH bin SELECT
hex <= "1000000" WHEN "0000", --0
"1111001" when "0001", --1
"0100100" when "0010", --2
"0110000" WHEN "0011", --3
"0011001" WHEN "0100", --4
"0010010" WHEN "0101", --5
"0000010" WHEN "0110", --6
"1111000" WHEN "0111", --7
"0000000" WHEN "1000", --8
"0010000" WHEN "1001", --9
"0001000" WHEN "1010", --A
"0000011" WHEN "1011", --b
"0100110" WHEN "1100", --C
"0100001" WHEN "1101", --d
"0000110" WHEN "1110", --E
"0001110" WHEN "1111", --F
"1111111" when others;
END behavioral;

I am getting wrong signal for one CLK period in my waveform

I have this scheme
I have to write structural VHDL design for it.
So these are my components:
MUX:
library IEEE;
use IEEE.STD_LOGIC_1164.all;
entity mux is
port(
A : in STD_LOGIC_VECTOR(7 downto 0);
B : in STD_LOGIC_VECTOR(7 downto 0);
Sel : in bit;
Z : out STD_LOGIC_VECTOR(7 downto 0)
);
end mux;
architecture Beh of mux is
begin
Z <= A when Sel='1'else
B;
end Beh;
REG:
library IEEE;
use IEEE.STD_LOGIC_1164.all;
entity reg is
port(
C : STD_LOGIC;
LD : in bit;
Reg_in : in STD_LOGIC_VECTOR(7 downto 0);
R_out : out STD_LOGIC_VECTOR(7 downto 0)
);
end reg;
architecture Beh of reg is
begin
process (C)
begin
if (rising_edge (C)) then
if (LD = '1') then
R_out <= Reg_in;
end if;
end if;
end process;
end Beh;
TOP:
library IEEE;
use IEEE.STD_LOGIC_1164.all;
entity top is
port(
LDA, LDB, S1, S0 : in bit;
CLK : in STD_LOGIC;
X, Y : in STD_LOGIC_VECTOR(7 downto 0);
RB : out STD_LOGIC_VECTOR(7 downto 0)
);
end top;
architecture Beh of top is
signal regB_out : STD_LOGIC_VECTOR(7 downto 0);
signal regA_out : STD_LOGIC_VECTOR(7 downto 0);
signal mux1_res : STD_LOGIC_VECTOR(7 downto 0);
signal mux2_res : STD_LOGIC_VECTOR(7 downto 0);
begin
Mux1: entity mux(Beh)
port map
(
A => X,
B => regB_out,
Sel => S1,
Z => mux1_res
);
RegA: entity reg(Beh)
port map
(
LD => LDA,
C => CLK,
Reg_in => mux1_res,
R_out => regA_out
);
Mux2: entity mux(Beh)
port map
(
A => regA_out,
B => Y,
Sel => S0,
Z => mux2_res
);
RB<=regB_out;
RegB: entity reg(Beh)
port map
(
LD => LDB,
C => CLK,
Reg_in => mux2_res,
R_out => regB_out
);
end Beh;
I am not sure I wrote bind between RB, regB_out and B correctly. And in the waveform when S1 and S0 both equal 0, I get nonsence for 1 CLK period. Like on the screenshot at 600ns '01' on RB shouldn't be there. Can some one help me to find mistakes?
TestBench:
library IEEE;
use IEEE.std_logic_1164.all;
use IEEE.std_logic_signed.all;
use IEEE.numeric_std.all;
ENTITY tbt is
END tbt;
ARCHITECTURE behavior OF tbt IS
COMPONENT TOP
PORT (
CLK : in STD_LOGIC;
LDA, LDB, S1, S0 : in bit;
X, Y : in STD_LOGIC_VECTOR(7 downto 0);
RB : out STD_LOGIC_VECTOR(7 downto 0)
);
END COMPONENT;
signal CLK_sig : std_logic;
signal LDA_sig, LDB_sig, S1_sig, S0_sig : bit :='0';
signal X_sig, Y_sig, RB_sig : std_logic_vector(7 downto 0):="00000000";
constant CLK_period : time := 100 ns;
constant s_per : time := 50 ns;
begin
-------------------------------------------------------------
uut: TOP PORT MAP (
CLK => CLK_sig,
LDA => LDA_sig,
LDB => LDB_sig,
S1 => S1_sig,
S0 => S0_sig,
X => X_sig,
Y => Y_sig,
RB=> RB_sig
);
-------------------------------------------------------------
CLK_process :process
begin
CLK_sig <= '0';
wait for CLK_period/2;
CLK_sig <= '1';
wait for CLK_period/2;
end process;
-------------------------------------------------------------
stim_proc: process
variable itertion_backwards : integer := 255;
variable itertion_forward : integer := 0;
begin
wait for CLK_period;
for itertion_forward in 0 to 254 loop
X_sig <= STD_LOGIC_VECTOR(TO_SIGNED(INTEGER(itertion_forward),8));
Y_sig <= STD_LOGIC_VECTOR(TO_SIGNED(INTEGER(itertion_backwards),8));
wait for CLK_period;
S1_sig<= not S1_sig;
wait for CLK_period;
S0_sig<= not S0_sig;
wait for CLK_period;
LDA_sig<= not LDA_sig;
wait for CLK_period;
LDB_sig<= not LDB_sig;
itertion_backwards := itertion_backwards - 1;
end loop;
wait;
end process;
end;

LFSR doesn't generate random values during simulation

I am new to VHDL, but have some idea. I made this LFSR but don't know why it is stuck between the initial seed value and the other XOR value.
I am working with Altera Quartus 16 Lite and ISim.
library ieee;
use ieee.std_logic_1164.all;
--creating a galois LFSR
entity LFSR is
port (
clk : in std_logic;
rst : in std_logic;
en : in std_logic;
rdm_out : out std_logic_vector(15 downto 0);
rdm_out_a : out std_logic_vector(7 downto 0);
rdm_out_b : out std_logic_vector(7 downto 0);
lfsr_Done : out std_logic --lfsr done
);
end entity LFSR;
architecture behavioral of LFSR is
signal temp_out : std_logic_vector(15 downto 0) := (0 => '1' ,others => '0'); --initial value as seed
signal temp_done : std_logic;
begin
process (clk, rst)
begin
if rising_edge (clk) then --module operates only when enabled
if (rst = '1') then
temp_out <= (0 => '1' ,others => '0');
temp_done <= '0';
elsif (en = '1') then
temp_out <= temp_out(15 downto 11) & (temp_out(10) xor temp_out(0)) & temp_out(9 downto 5) & (temp_out(4) xor temp_out(0)) & temp_out(3 downto 0);
--temp_out <= (temp_out(15) xor temp_out(0)) & (temp_out(14) xor temp_out(0)) & temp_out(13) & (temp_out(12) xor temp_out(0)) & temp_out(11 downto 4) & (temp_out(3) xor temp_out(0)) & temp_out(2 downto 0);
temp_done <= '1';
end if;
end if;
end process;
rdm_out <= temp_out(15 downto 0);
rdm_out_a <= temp_out(15 downto 8);
rdm_out_b <= temp_out(7 downto 0);
lfsr_Done <= temp_done;
end architecture behavioral;`
The commented out temp_out is actual feedback (taps are 16,15,13, and 4) as I checked with random taps but still no improvement.
And the testbench I used is this:
library ieee;
use ieee.std_logic_1164.all;
entity lfsr_tb is
end lfsr_tb;
architecture test_bench of lfsr_tb is
component LFSR
port (
clk : in std_logic;
rst : in std_logic;
en : in std_logic;
rdm_out : out std_logic_vector(15 downto 0);
rdm_out_a : out std_logic_vector(7 downto 0);
rdm_out_b : out std_logic_vector(7 downto 0);
lfsr_Done : out std_logic );
end component;
signal clk1: std_logic;
signal rst1: std_logic;
signal en1 : std_logic;
signal rdm_out1 : std_logic_vector(15 downto 0);
signal rdm_out_a1 : std_logic_vector(7 downto 0);
signal rdm_out_b1 : std_logic_vector(7 downto 0);
signal lfsr_Done1 : std_logic ;
begin
mapping: LFSR port map(
clk => clk1,
rst => rst1,
en => en1,
rdm_out => rdm_out1,
rdm_out_a => rdm_out_a1,
rdm_out_b => rdm_out_b1,
lfsr_Done => lfsr_Done1 );
clock: process
begin
clk1 <= '0'; wait for 10 ps;
clk1 <= '1'; wait for 10 ps;
end process;
reset: process
begin
rst1 <= '1'; wait for 10 ps;
rst1 <= '0';
en1 <= '1'; wait for 800 ps;
end process;
end test_bench;
This is the result I am getting:
Yes it was not shifting but this is one is working now.
temp_out(15) <= temp_out(0);-- shifting bit
temp_out(14) <= temp_out(15);
temp_out(13) <= temp_out(14) xor temp_out(0);
temp_out(12) <= temp_out(13) xor temp_out(0);
temp_out(11) <= temp_out(12);
temp_out(10) <= temp_out(11) xor temp_out(0);
temp_out(9 downto 0) <= temp_out(10 downto 1);
Hope it helps others. Thanks guys

Simple VHDL ALU will not show inputs or overflow in the waveform

I'm supposed to write up a 16-bit ALU. My professor wants us to try and code the adder and sub of the ALU with a
signal tmp : std_logic_vector(16 downto 0); and then in the case for the select input s we put:
tmp <= conv_std_logic_vector(conv_integer(a) + conv_integer(b), 17);
After experimenting with it for a while, my waveform only showed the inputs' values as UUUUUUUUUUUUUUUU. Even after I had commented out the conv_std_logic_vector(...) stuff.
Is there a simple explanation as to why my inputs aren't showing up in the waveform?
Here is my code:
-- 16-Bit ALU
-- By: Logan Jordon
library ieee;
use ieee.std_logic_1164.all;
use ieee.std_logic_unsigned.all;
use IEEE.NUMERIC_STD.ALL;
--use ieee.std_logic_arith.all;
 
entity alu16 is
port (
a : in std_logic_vector(15 downto 0);
b : in std_logic_vector(15 downto 0);
s : in std_logic_vector(1 downto 0);
r : out std_logic_vector(15 downto 0);
cout : out std_logic;
lt, eq, gt : out std_logic;
overflow : out std_logic
);
end entity alu16;
architecture beh of alu16 is
signal tmp : std_logic_vector(16 downto 0);
signal add_overflow : std_logic;
signal sub_overflow : std_logic;
begin
-- PROCESS
process(a, b, add_overflow, sub_overflow)
begin
case s is
--ADD
when "00" =>
--tmp <= conv_std_logic_vector(conv_integer(a) + conv_integer(b), 17);
tmp <= a + b;
overflow <= add_overflow;
--SUB
when "01" =>
--tmp <= conv_std_logic_vector(conv_integer(a) - conv_integer(b), 17);
tmp <= a - b;
overflow <= sub_overflow;
--AND
when "10" =>
tmp <= '0' & a AND b;
overflow <= '0';
--OR
when "11" =>
tmp <= '0' & a OR b;
overflow <= '0';
when others =>
tmp <= "00000000000000000";
end case;
--One-Bitters
if a > b then
gt <= '1';
lt <= '0';
eq <= '0';
elsif a < b then
lt <= '1';
gt <= '0';
eq <= '0';
elsif a = b then
eq <= '1';
lt <= '0';
gt <= '0';
end if;
end process;
--OUTPUTS
cout <= tmp(16);
r <= tmp(15 downto 0);
add_overflow <= '1' when (a(15) = b(15)) and (a(15) /= tmp(15))
else '0';
sub_overflow <= '1' when (a(15) = NOT b(15)) and (a(15) /= tmp(15))
else '0';
end beh;
EDIT: In the case that it might be my test bench, here's the code for my testbench:
library ieee;
use ieee.std_logic_1164.all;
use ieee.std_logic_unsigned.all;
use IEEE.NUMERIC_STD.ALL;
entity alu16_tb is
end alu16_tb;
architecture behavior of alu16_tb is
component ALU16
port(
a : in std_logic_vector(15 downto 0);
b : in std_logic_vector(15 downto 0);
s : in std_logic_vector(1 downto 0);
r : out std_logic_vector(15 downto 0);
cout : out std_logic;
lt, eq, gt : out std_logic;
overflow : out std_logic
);
end component;
-- Signals to interface with the UUT
-- Set each of the input vectors to unique values to avoid
-- needing a process to drive them below
signal a : std_logic_vector(15 downto 0) := "0000000000000000";
signal b : std_logic_vector(15 downto 0) := "0000000000000000";
signal s : std_logic_vector(1 downto 0) := "00";
signal r : std_logic_vector(15 downto 0):= "0000000000000000";
signal cout : std_logic := '0';
signal lt : std_logic := '0';
signal gt : std_logic := '0';
signal eq : std_logic := '0';
signal overflow : std_logic := '0';
constant tick : time := 10 ns;
begin
-- Instantiate the Unit Under Test (UUT)
uut : ALU16 port map (
a => a,
b => b,
s => s,
r => r,
cout => cout,
lt => lt,
gt => gt,
eq => eq,
overflow => overflow
);
-- Drive selector bits
drive_s : process
begin
a <= "0000000000000001";
b <= "0000000000000010";
wait for (tick*2);
s <= "00";
wait for (tick*2);
s <= "01";
wait for (tick*2);
s <= "10";
wait for (tick*2);
s <= "11";
end process drive_s;
end;

How to deal with signed numbers correctly and still use "+"

library IEEE;
use IEEE.STD_LOGIC_1164.ALL;
use IEEE.NUMERIC_STD.ALL;
entity alu_16 is
Port ( a : in STD_LOGIC_VECTOR(15 downto 0);
b : in STD_LOGIC_VECTOR(15 downto 0);
sel : in STD_LOGIC_VECTOR (1 downto 0);
gt : out STD_LOGIC;
lt : out STD_LOGIC;
eq : out STD_LOGIC;
result : out SIGNED(15 downto 0);
overflow : out STD_LOGIC;
cout : in STD_LOGIC);
end alu_16;
architecture Behavioral of alu_16 is
signal inter_res : SIGNED(16 downto 0);
signal subtraction : SIGNED(16 downto 0);
signal addition : SIGNED (16 downto 0);
signal carry_in : STD_LOGIC;
signal carry_out : STD_LOGIC;
signal msb_bit_add : STD_LOGIC;
begin
gt <= '1' when a > b else '0';
lt <= '1' when a < b else '0';
eq <= '1' when a = b else '0';
subtraction <= signed(a) - signed(b);
addition <= signed(a) + signed(b);
with sel select
inter_res <= addition when "00",
subtraction when "01",
signed(a) AND signed(b) when "10",
signed(a) OR signed(b) when others;
carry_out <= inter_res(16);
msb_bit_add <= std_logic(a(15) + b(15));
carry_in <= msb_bit_add XOR inter_res(15);
overflow <= NOT(carry_in XOR carry_out);
result <= inter_res(15 downto 0);
end Behavioral;
So.. I'm trying to make a 16 bit signed adder without using a ripple carry adder. However, I am getting errors about overloading the + operator at the one bit add for msb_bit_add. Can anyone shed some light on what I should do on that line?
Thanks!

Resources