I'm learning how to use the time simulation on Quartus II to see the real delays in a circuit, and an error has occurred. This error says that I'm not respecting the hold time for the flip-flop. In the logic simulation the circuit works.
Down you can see the code:
module AddTestParalellIf(clk,reset, sum, out);
input clk, reset;
output sum, out;
reg [15:0] sum;
reg out ;
always #(posedge clk ) begin
if (reset) begin
sum = 0;
out = 0;
end
else
if (sum == 16'b0000000010000010)
out = 1;
sum = sum + 1;
end
endmodule
AND THE ERROR:
Time: 0 ps Iteration: 0 Instance: /AddTestParalellIf_vlg_vec_tst File: plataformadetestes.vt
# ** Error: c:/altera/13.0/modelsim_ase/win32aloem/../altera/verilog /src/cycloneii_atoms.v(5351): $hold( posedge clk &&& nosloadsclr:27871 ps, datain:27922 ps, 286 ps );
# Time: 27922 ps Iteration: 0 Instance: /AddTestParalellIf_vlg_vec_tst/i1/\sum[1]~reg0
# ERROR! Vector Mismatch for output port out :: #time = 1000000.000 ps
# Expected value = 0
# Real value = x
# ERROR! Vector Mismatch for output port sum[1] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[2] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[3] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[4] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[5] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[6] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[7] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[8] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[9] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[10] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[11] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[12] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[13] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[14] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[15] :: #time = 1000000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx0
# ERROR! Vector Mismatch for output port sum[0] :: #time = 1005000.000 ps
# Expected value = 0000000000000000
# Real value = xxxxxxxxxxxxxxx1
# 17 mismatched vectors : Simulation failed !
# ** Note: $finish : plataformadetestes.vt(463)
# Time: 10 us Iteration: 0 Instance: /AddTestParalellIf_vlg_vec_tst/tb_out
I'm using the model sim simulator on quartus II web
Try adding a begin end around the two statements in your else clause. If reset is asserted you appear to clearing and incrementing sum simultaneously.
Related
in raspberry pi pico board when i tried to connect 3v3 with 3v3 of raspberry pi pico , gnd to gnd of raspberry pi pico and dat to g16 and ran this belo code on thonny micropython .
main temp_hum_DHT_code.py
from machine import Pin
import time
from dht import DHT11, InvalidChecksum
sensor = DHT11(Pin(16, Pin.OUT, Pin.PULL_DOWN))
while True:
temp = sensor.temperature
humidity = sensor.humidity
print("Temperature: {}°C Humidity: {:.0f}% ".format(temp, humidity))
time.sleep(2)
dht.py
import array
import micropython
import utime
from machine import Pin
from micropython import const
class InvalidChecksum(Exception):
pass
class InvalidPulseCount(Exception):
pass
MAX_UNCHANGED = const(100)
MIN_INTERVAL_US = const(200000)
HIGH_LEVEL = const(50)
EXPECTED_PULSES = const(84)
class DHT11:
_temperature: float
_humidity: float
def __init__(self, pin):
self._pin = pin
self._last_measure = utime.ticks_us()
self._temperature = -1
self._humidity = -1
def measure(self):
current_ticks = utime.ticks_us()
if utime.ticks_diff(current_ticks, self._last_measure) < MIN_INTERVAL_US and (
self._temperature > -1 or self._humidity > -1
):
# Less than a second since last read, which is too soon according
# to the datasheet
return
self._send_init_signal()
pulses = self._capture_pulses()
buffer = self._convert_pulses_to_buffer(pulses)
self._verify_checksum(buffer)
self._humidity = buffer[0] + buffer[1] / 10
self._temperature = buffer[2] + buffer[3] / 10
self._last_measure = utime.ticks_us()
#property
def humidity(self):
self.measure()
return self._humidity
#property
def temperature(self):
self.measure()
return self._temperature
def _send_init_signal(self):
self._pin.init(Pin.OUT, Pin.PULL_DOWN)
self._pin.value(1)
utime.sleep_ms(50)
self._pin.value(0)
utime.sleep_ms(18)
#micropython.native
def _capture_pulses(self):
pin = self._pin
pin.init(Pin.IN, Pin.PULL_UP)
val = 1
idx = 0
transitions = bytearray(EXPECTED_PULSES)
unchanged = 0
timestamp = utime.ticks_us()
while unchanged < MAX_UNCHANGED:
if val != pin.value():
if idx >= EXPECTED_PULSES:
raise InvalidPulseCount(
"Got more than {} pulses".format(EXPECTED_PULSES)
)
now = utime.ticks_us()
transitions[idx] = now - timestamp
timestamp = now
idx += 1
val = 1 - val
unchanged = 0
else:
unchanged += 1
pin.init(Pin.OUT, Pin.PULL_DOWN)
if idx != EXPECTED_PULSES:
raise InvalidPulseCount(
"Expected {} but got {} pulses".format(EXPECTED_PULSES, idx)
)
return transitions[4:]
def _convert_pulses_to_buffer(self, pulses):
"""Convert a list of 80 pulses into a 5 byte buffer
The resulting 5 bytes in the buffer will be:
0: Integral relative humidity data
1: Decimal relative humidity data
2: Integral temperature data
3: Decimal temperature data
4: Checksum
"""
# Convert the pulses to 40 bits
binary = 0
for idx in range(0, len(pulses), 2):
binary = binary << 1 | int(pulses[idx] > HIGH_LEVEL)
# Split into 5 bytes
buffer = array.array("B")
for shift in range(4, -1, -1):
buffer.append(binary >> shift * 8 & 0xFF)
return buffer
def _verify_checksum(self, buffer):
# Calculate checksum
checksum = 0
for buf in buffer[0:4]:
checksum += buf
if checksum & 0xFF != buffer[4]:
raise InvalidChecksum()
.............................................................................................................................................................
I have the following binary clock script code below. The code works fine, but I want to ensure the clock syncs every few hours or so. I realize that the code takes the system time, but would prefer if it reached out to a ntp site to get/correct the time. Can someone please assist?
#!/usr/bin/env python3
A binary clock: displays the current time (HHMMSS) in binary-coded decimal.'''
import time
from itertools import zip_longest
from gpiozero import LED
def main():
# H8 M8 S8
leds = [ None, LED(14), None, LED(24), None, LED(12),
# H40 H4 M40 M4 S40 S4
LED(22), LED(15), LED(11), LED(25), LED(13), LED(16),
# H20 H2 M20 M2 S20 S2
LED(10), LED(18), LED(5), LED(8), LED(19), LED(20),
# H10 H1 M10 M1 S10 S1
LED(9), LED(23), LED(6), LED(7), LED(26), LED(21)]
try:
while True:
t = time.strftime('%H%M%S')
print(t)
b = bcd(t)
s = vertical_strings(b)
print(s + '\n')
light(s, leds)
time.sleep(0.2)
except KeyboardInterrupt:
print('done')
# bcd :: iterable(characters '0'-'9') -> [str]
def bcd(digits):
'Convert a string of decimal digits to binary-coded-decimal.'
def bcdigit(d):
'Convert a decimal digit to BCD (4 bits wide).'
# [2:] strips the '0b' prefix added by bin().
return bin(d)[2:].rjust(4, '0')
return (bcdigit(int(d)) for d in digits)
# vertical_strings :: iterable(str) -> str
def vertical_strings(strings):
'Orient an iterable of strings vertically: one string per column.'
iters = [iter(s) for s in strings]
concat = ''.join
return ''.join(map(concat,
zip_longest(*iters, fillvalue=' ')))
def light(strings, leds):
x = 0
for l in strings:
if l == '1' and leds[x] is not None:
leds[x].on()
elif l == '0' and leds[x] is not None:
leds[x].off()
x = x + 1
if __name__ == '__main__':
main()
Note: Code is cross-compiled in windows 10.
Code:
package main
import (
"fmt"
"io"
"log"
"net/http"
aosong "github.com/d2r2/go-aosong"
i2c "github.com/d2r2/go-i2c"
)
const i2CAddress = 0x5c
const i2CBus = 1
// Server struct
type Server struct {
Sensor *aosong.Sensor
I2C *i2c.I2C
}
func main() {
var err error
s := Server{Sensor: aosong.NewSensor(aosong.AM2320)}
s.I2C, err = i2c.NewI2C(i2CAddress, i2CBus)
if err != nil {
log.Printf(err.Error())
}
fmt.Println(s.Sensor.ReadRelativeHumidityAndTemperature(s.I2C))
defer s.I2C.Close()
}
Debug info:
2019-02-12T10:29:19.692 [ i2c] DEBUG Write 3 hex bytes: [030004]
2019-02-12T10:29:19.697 [ i2c] DEBUG Read 8 hex bytes: [0304012500d92045]
2019-02-12T10:29:19.698 [ i2c] DEBUG Read 8 hex bytes: [0000000000000000]
CRCs doesn't match: CRC from sensor(0) != calculated CRC(6912).
Any ideea why the CRC from sensor is 0?
I am able to read the sensor on the same bus with the same address with a python script.
#!/usr/bin/python
import posix
from fcntl import ioctl
import time
class AM2320:
I2C_ADDR = 0x5c
I2C_SLAVE = 0x0703
def __init__(self, i2cbus = 1):
self._i2cbus = i2cbus
#staticmethod
def _calc_crc16(data):
crc = 0xFFFF
for x in data:
crc = crc ^ x
for bit in range(0, 8):
if (crc & 0x0001) == 0x0001:
crc >>= 1
crc ^= 0xA001
else:
crc >>= 1
return crc
#staticmethod
def _combine_bytes(msb, lsb):
return msb << 8 | lsb
def readSensor(self):
fd = posix.open("/dev/i2c-%d" % self._i2cbus, posix.O_RDWR)
ioctl(fd, self.I2C_SLAVE, self.I2C_ADDR)
# wake AM2320 up, goes to sleep to not warm up and affect the humidity sensor
# This write will fail as AM2320 won't ACK this write
try:
posix.write(fd, b'\0x00')
except:
pass
time.sleep(0.001) #Wait at least 0.8ms, at most 3ms
# write at addr 0x03, start reg = 0x00, num regs = 0x04 */
posix.write(fd, b'\x03\x00\x04')
time.sleep(0.0016) #Wait at least 1.5ms for result
# Read out 8 bytes of result data
# Byte 0: Should be Modbus function code 0x03
# Byte 1: Should be number of registers to read (0x04)
# Byte 2: Humidity msb
# Byte 3: Humidity lsb
# Byte 4: Temperature msb
# Byte 5: Temperature lsb
# Byte 6: CRC lsb byte
# Byte 7: CRC msb byte
data = bytearray(posix.read(fd, 8))
# Check data[0] and data[1]
if data[0] != 0x03 or data[1] != 0x04:
raise Exception("First two read bytes are a mismatch")
# CRC check
if self._calc_crc16(data[0:6]) != self._combine_bytes(data[7], data[6]):
raise Exception("CRC failed")
# Temperature resolution is 16Bit,
# temperature highest bit (Bit15) is equal to 1 indicates a
# negative temperature, the temperature highest bit (Bit15)
# is equal to 0 indicates a positive temperature;
# temperature in addition to the most significant bit (Bit14 ~ Bit0)
# indicates the temperature sensor string value.
# Temperature sensor value is a string of 10 times the
# actual temperature value.
temp = self._combine_bytes(data[4], data[5])
if temp & 0x8000:
temp = -(temp & 0x7FFF)
temp /= 10.0
humi = self._combine_bytes(data[2], data[3]) / 10.0
return (temp, humi)
am2320 = AM2320(1)
(t,h) = am2320.readSensor()
print t, h
Seems there was an issue with the library itself which was making two reads, but because the read code was not sent the values came as 0, as it can be seen in the logs:
2019-02-12T10:29:19.692 [ i2c] DEBUG Write 3 hex bytes: [030004]
2019-02-12T10:29:19.697 [ i2c] DEBUG Read 8 hex bytes: [0304012500d92045] (first read that was ignored)
2019-02-12T10:29:19.698 [ i2c] DEBUG Read 8 hex bytes: [0000000000000000] (second one that came a 0)
CRCs doesn't match: CRC from sensor(0) != calculated CRC(6912)
Made a PR to fix the issue: https://github.com/d2r2/go-aosong/pull/3
I built 5-layer neural network by using tensorflow.
I have a problem to get reproducible results (or stable results).
I found similar questions regarding reproducibility of tensorflow and the corresponding answers, such as How to get stable results with TensorFlow, setting random seed
But the problem is not solved yet.
I also set random seed like the following
tf.set_random_seed(1)
Furthermore, I added seed options to every random function such as
b1 = tf.Variable(tf.random_normal([nHidden1], seed=1234))
I confirmed that the first epoch shows the identical results, but not identical from the second epoch little by little.
How can I get the reproducible results?
Am I missing something?
Here is a code block I use.
def xavier_init(n_inputs, n_outputs, uniform=True):
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range, seed=1234)
else:
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev, seed=1234)
import numpy as np
import tensorflow as tf
import dataSetup
from scipy.stats.stats import pearsonr
tf.set_random_seed(1)
x_train, y_train, x_test, y_test = dataSetup.input_data()
# Parameters
learningRate = 0.01
trainingEpochs = 1000000
batchSize = 64
displayStep = 100
thresholdReduce = 1e-6
thresholdNow = 0.6
#dropoutRate = tf.constant(0.7)
# Network Parameter
nHidden1 = 128 # number of 1st layer nodes
nHidden2 = 64 # number of 2nd layer nodes
nInput = 24 #
nOutput = 1 # Predicted score: 1 output for regression
# save parameter
modelPath = 'model/model_layer5_%d_%d_mini%d_lr%.3f_noDrop_rollBack.ckpt' %(nHidden1, nHidden2, batchSize, learningRate)
# tf Graph input
X = tf.placeholder("float", [None, nInput])
Y = tf.placeholder("float", [None, nOutput])
# Weight
W1 = tf.get_variable("W1", shape=[nInput, nHidden1], initializer=xavier_init(nInput, nHidden1))
W2 = tf.get_variable("W2", shape=[nHidden1, nHidden2], initializer=xavier_init(nHidden1, nHidden2))
W3 = tf.get_variable("W3", shape=[nHidden2, nHidden2], initializer=xavier_init(nHidden2, nHidden2))
W4 = tf.get_variable("W4", shape=[nHidden2, nHidden2], initializer=xavier_init(nHidden2, nHidden2))
WFinal = tf.get_variable("WFinal", shape=[nHidden2, nOutput], initializer=xavier_init(nHidden2, nOutput))
# biases
b1 = tf.Variable(tf.random_normal([nHidden1], seed=1234))
b2 = tf.Variable(tf.random_normal([nHidden2], seed=1234))
b3 = tf.Variable(tf.random_normal([nHidden2], seed=1234))
b4 = tf.Variable(tf.random_normal([nHidden2], seed=1234))
bFinal = tf.Variable(tf.random_normal([nOutput], seed=1234))
# Layers for dropout
L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), b1))
L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), b2))
L3 = tf.nn.relu(tf.add(tf.matmul(L2, W3), b3))
L4 = tf.nn.relu(tf.add(tf.matmul(L3, W4), b4))
hypothesis = tf.add(tf.matmul(L4, WFinal), bFinal)
print "Layer setting DONE..."
# define loss and optimizer
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learningRate).minimize(cost)
# Initialize the variable
init = tf.initialize_all_variables()
# save op to save and restore all the variables
saver = tf.train.Saver()
with tf.Session() as sess:
# initialize
sess.run(init)
print "Initialize DONE..."
# Training
costPrevious = 100000000000000.0
best = float("INF")
totalBatch = int(len(x_train)/batchSize)
print "Total Batch: %d" %totalBatch
for epoch in range(trainingEpochs):
#print "EPOCH: %04d" %epoch
avgCost = 0.
for i in range(totalBatch):
np.random.seed(i+epoch)
randidx = np.random.randint(len(x_train), size=batchSize)
batch_xs = x_train[randidx,:]
batch_ys = y_train[randidx,:]
# Fit traiing using batch data
sess.run(optimizer, feed_dict={X:batch_xs, Y:batch_ys})
# compute average loss
avgCost += sess.run(cost, feed_dict={X:batch_xs, Y:batch_ys})/totalBatch
# compare the current cost and the previous
# if current cost > the previous
# just continue and make the learning rate half
#print "Cost: %1.8f --> %1.8f at epoch %05d" %(costPrevious, avgCost, epoch+1)
if avgCost > costPrevious + .5:
#sess.run(init)
load_path = saver.restore(sess, modelPath)
print "Cost increases at the epoch %05d" %(epoch+1)
print "Cost: %1.8f --> %1.8f" %(costPrevious, avgCost)
continue
costNow = avgCost
reduceCost = abs(costPrevious - costNow)
costPrevious = costNow
#Display logs per epoch step
if costNow < best:
best = costNow
bestMatch = sess.run(hypothesis, feed_dict={X:x_test})
# model save
save_path = saver.save(sess, modelPath)
if epoch % displayStep == 0:
print "step {}".format(epoch)
pearson = np.corrcoef(bestMatch.flatten(), y_test.flatten())
print 'train loss = {}, current loss = {}, test corrcoef={}'.format(best, costNow, pearson[0][1])
if reduceCost < thresholdReduce or costNow < thresholdNow:
print "Epoch: %04d, Cost: %.9f, Prev: %.9f, Reduce: %.9f" %(epoch+1, costNow, costPrevious, reduceCost)
break
print "Optimization Finished"
It seems that your results are perhaps not reproducible because you are using Saver to write/restore from checkpoint each time? (i.e. the second time that you run the code, the variable values aren't initialized using your random seed -- they are restored from your previous checkpoint)
Please trim down your code example to just the code necessary to reproduce irreproducibility.
How would I make a *.bmp image using 1 bit per pixel using VB6? Does an example project exist for something like this?
'# # Image Data Info : #
'# # Each black dot are represented as binary 1(high)#
'# # and white are represented as binary 0(low) in #
'# # form of hexadecimal character. #
'# # Example : (for this example assume the image width is 8)#
'# # Data : 7E817E #
'# # Binary data : 7=0111, E=1110, 8=1000, 1=0001 #
'# # 7=0111, E=1110 #
'# # Image data : px1 px2 px3 px4 px5 px6 px7 px8 #
'# # px1 w b b b b b b w #
'# # px2 b w w w w w w b #
'# # px3 w b b b b b b w #
'# # #
'# # w = white, b = black, px = pixel #
Details:
You may use the following code, please note that:
the image width must be a multiple of 8;
the rows start from the bottom;
If the requirements are not good for you, the code can be fixed accordingly.
Option Explicit
Private Type BITMAPFILEHEADER
bfType As String * 2
bfSize As Long
bfReserved1 As Integer
bfReserved2 As Integer
bfOffBits As Long
End Type
Private Type BITMAPINFOHEADER
biSize As Long
biWidth As Long
biHeight As Long
biPlanes As Integer
biBitCount As Integer
biCompression As Long
biSizeImage As Long
biXPelsPerMeter As Long
biYPelsPerMeter As Long
biClrUsed As Long
biClrImportant As Long
End Type
Private Type RGBQUAD
rgbBlue As Byte
rgbGreen As Byte
rgbRed As Byte
rgbReserved As Byte
End Type
Private Type BITMAPINFO
bmiHeader As BITMAPINFOHEADER
bmiColors(1) As RGBQUAD
End Type
Public Function strToBmp(str As String, w As Integer, h As Integer, filename As String) As Boolean
Dim bmfh As BITMAPFILEHEADER
Dim bmi As BITMAPINFO
Dim r As Boolean
Dim ff As Integer
Dim i As Integer
Dim x As Integer
Dim rl As Integer
Dim rw As Integer
Dim s As String
Dim b As Byte
rw = ((w + 31) \ 32 + 3) And &HFFFFFFFC
With bmfh
.bfType = "BM"
.bfSize = Len(bmfh) + Len(bmi) + rw * h
.bfOffBits = Len(bmfh) + Len(bmi)
End With
With bmi.bmiHeader
.biSize = Len(bmi.bmiHeader)
.biWidth = w
.biHeight = h
.biPlanes = 1
.biBitCount = 1
.biCompression = 0
.biSizeImage = rw * h
.biXPelsPerMeter = 72
.biYPelsPerMeter = 72
.biClrUsed = 0
.biClrImportant = 0
End With
With bmi.bmiColors(0)
.rgbRed = 255
.rgbGreen = 255
.rgbBlue = 255
End With
On Error Resume Next
Call Kill(filename)
On Error GoTo e2
ff = FreeFile()
Open filename For Binary Access Write As #ff
On Error GoTo e1
Put #ff, , bmfh
Put #ff, , bmi
For i = 1 To Len(str) Step 2
b = CByte("&H" & Mid(str, i, 2))
Put #ff, , b
rl = rl + 1
x = x + 8
If x = w Then
b = 0
Do While rl < rw
Put #ff, , b
rl = rl + 1
Loop
x = 0
rl = 0
End If
Next i
r = True
e1:
Close ff
e2:
strToBmp = r
End Function
Public Sub test()
Call strToBmp("7E817E", 8, 3, "out.bmp")
End Sub
This is the resulting image:
Please also note that Microsoft Paint seems to have a bug which affects monochromatic images resulting in the scrambling of some pixels.