Why Kafka message delivery from Producer to Consumer is so slow? - go

We have multiple microservices written in GoLang exchanging messages over the Kafka message bus. A microservice writes on a Kafka topic with a partition count of 3 with a replica factor of 2. We use AWS MSK for kafka brooker. We are using the Shopify Kafka client to connect with brokers.
Here is my Producer code -
package kf
import (
"fmt"
"github.com/Shopify/sarama"
"github.com/segmentio/kafka-go"
"net"
"strconv"
)
type Producer struct {
flowEventProducer sarama.SyncProducer
topic string
}
func InitProducer(brokers []string, topic string) *Producer {
CreateKafkaTopic(brokers[0], topic)
p := &Producer{}
prod, err := newFlowWriter(brokers)
if err != nil {
panic("failed to connect to producer")
}
p.flowEventProducer = prod
p.topic = topic
return p
}
func CreateKafkaTopic(kafkaURL, topic string) {
conn, err := kafka.Dial("tcp", kafkaURL)
if err != nil {
panic(err.Error())
}
controller, err := conn.Controller()
if err != nil {
panic(err.Error())
}
var controllerConn *kafka.Conn
controllerConn, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port)))
if err != nil {
panic(err.Error())
}
defer controllerConn.Close()
topicConfigs := []kafka.TopicConfig{
{
Topic: topic,
NumPartitions: 3,
ReplicationFactor: 2,
},
}
err = controllerConn.CreateTopics(topicConfigs...)
if err != nil {
panic(err.Error())
}
defer conn.Close()
}
func newFlowWriter(brokers []string) (sarama.SyncProducer, error) {
config := sarama.NewConfig()
version := "2.6.2"
kafkaVer, err := sarama.ParseKafkaVersion(version)
if err != nil {
panic("failed to parse kafka version, producer will not run")
}
config.Producer.Partitioner = sarama.NewHashPartitioner
config.Net.MaxOpenRequests = 10
config.Producer.RequiredAcks = sarama.WaitForLocal
config.Producer.Return.Successes = true
config.Version = kafkaVer
producer, err := sarama.NewSyncProducer(brokers, config)
return producer, err
}
func (p *Producer) WriteMessage(uuid string, data []byte) error {
msg := &sarama.ProducerMessage{
Topic: p.topic,
Key: sarama.ByteEncoder(uuid),
Value: sarama.ByteEncoder(data),
}
part, off, err := p.flowEventProducer.SendMessage(msg)
if err != nil {
return err
} else {
fmt.Printf("message wriiten on part:%d and offset: %d", part, off)
}
return nil
}
Here is my consumer -
package kf
import (
"context"
"encoding/json"
"fmt"
"github.com/Shopify/sarama"
)
type Consumer struct {
flowEventReader sarama.ConsumerGroup
topic string
brokerUrls []string
}
type data struct {
Name string `json:"name"`
Employee string `json:"employee"`
}
func InitConsumer(brokers []string, topic string) *Consumer {
c := &Consumer{}
c.topic = topic
c.brokerUrls = brokers
var (
err error
)
conf := createSaramaKafkaConf()
c.flowEventReader, err = sarama.NewConsumerGroup(c.brokerUrls, "myconf", conf)
if err != nil {
panic("failed to create consumer group on kafka cluster")
}
return c
}
type KafkaConsumerGroupHandler struct {
Cons *Consumer
}
func (c *Consumer) HandleMessages() {
// Consume from kafka and process
for {
var err = c.flowEventReader.Consume(context.Background(), []string{c.topic}, &KafkaConsumerGroupHandler{Cons: c})
if err != nil {
fmt.Println("FAILED")
continue
}
}
}
func (*KafkaConsumerGroupHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
func (*KafkaConsumerGroupHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
func (l *KafkaConsumerGroupHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for msg := range claim.Messages() {
l.Cons.logMessage(msg)
sess.MarkMessage(msg, "")
}
return nil
}
func (c *Consumer) logMessage(msg *sarama.ConsumerMessage) {
d := &data{}
err := json.Unmarshal(msg.Value, d)
if err != nil {
fmt.Println(err)
}
fmt.Printf("messages: key: %s and val:%+v", string(msg.Key), d)
}
func createSaramaKafkaConf() *sarama.Config {
conf := sarama.NewConfig()
version := "2.6.2"
kafkaVer, err := sarama.ParseKafkaVersion(version)
if err != nil {
panic("failed to parse kafka version, executor will not run")
}
conf.Version = kafkaVer
conf.Consumer.Offsets.Initial = sarama.OffsetOldest
conf.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.BalanceStrategyRoundRobin}
return conf
}
If we put load in microservices and producer starts producing messages of order of 500 events with each having size of ~1kb. we are encountering a delay of 30 seconds in message delivery. We want instant message delivery post-production. I think Kafka is very much capable of for my use-case. Please, help me in figuring out the issue for this delay.

Related

XACK is not deleting the message, even if it is processed successfully?

I am trying to implement redis stream where we have a producer.
package producer
import (
"RedisStream/models"
"encoding/json"
"fmt"
"github.com/garyburd/redigo/redis"
)
type Producer struct {
streamName string
}
func NewProducer(streamName string) *Producer {
return &Producer{streamName: streamName}
}
func (p *Producer) WriteEvents(conn redis.Conn, key string) {
// Create a new struct
employee := models.Employee{
Name: "ashutosh",
Employer: "self-employee",
}
// Convert struct to JSON
e, _ := json.Marshal(employee)
// Send key and value to Redis stream
_, err := conn.Do("XADD", p.streamName, "*", key, e)
if err != nil {
fmt.Println(err)
}
fmt.Println("Successfully sent data to Redis stream")
}
then I have implemented a consumer
func (c *Consumer) ReadEventsCons1() {
// Connect to Redis
conn, err := redis.Dial("tcp", ":6379")
if err != nil {
fmt.Println(err)
return
}
defer conn.Close()
for {
// Read key and value from Redis stream
reply, err := conn.Do("XREADGROUP", "GROUP", c.groupName[0], "ashu", "COUNT", "1", "STREAMS", c.streamName, ">")
vs, err := redis.Values(reply, err)
if err != nil {
if errors.Is(err, redis.ErrNil) {
continue
}
fmt.Printf("Error: %+v", err)
}
// Get the first and only value in the array since we're only
// reading from one stream "some-stream-name" here.
vs, err = redis.Values(vs[0], nil)
if err != nil {
fmt.Printf("Error: %+v", err)
}
// Ignore the stream name as the first value as we already have
// that in hand! Just get the second value which is guaranteed to
// exist per the docs, and parse it as some stream entries.
res, err := entries(vs[1], nil)
if err != nil {
fmt.Errorf("error parsing entries: %w", err)
}
for _, val := range res {
for k, v := range val.Fields {
empl := &models.Employee{}
_ = json.Unmarshal(v, empl)
fmt.Printf("From Consumer Ashu: Key: %s and val: %+v \n", k, empl)
}
reply, err := redis.Int(conn.Do("XACK", c.streamName, c.groupName[0], val.ID))
if reply != 1 {
fmt.Printf("failed to ack: err: %+v", err)
}
}
}
}
Once a consumer from a consumergroup successfully processed a message, I sent acknowledgement to redis.But messages still resides in redis stream. because post running
XLEN streamName
I can see length is growing. This may create memory challenge, since messages are residing in perpetuity. Is there any intelligent way to handle this issue?

Sarama Kafka consumergroup function return

I am very new to Go Lang and attempting to make some adjustments to an open source library that consumes messages from Kafka using the Sarama library. The original code can be found here.
The original package implements a PartitionConsumer that works just fine if one doesn't need read consistency across multiple consumers consuming the same topic, however, that does not work for me.
I have done some work within the same application to implement the sarama NewConsumerGroup package using some examples I have found online.
Below is the code I currently have running:
package main
import (
"context"
// "flag"
"os"
"os/signal"
"sync"
"syscall"
"encoding/json"
"log"
"strings"
"github.com/Shopify/sarama"
// "github.com/Shopify/sarama/mocks"
)
// KafkaInput is used for recieving Kafka messages and
// transforming them into HTTP payloads.
type KafkaInput struct {
config *KafkaConfig
// consumers []sarama.PartitionConsumer
messages chan *sarama.ConsumerMessage
}
var (
brokers = ""
version = ""
group = ""
topics = ""
assignor = ""
oldest = true
verbose = false
)
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
}
// NewKafkaInput creates instance of kafka consumer client.
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
/**
* Construct a new Sarama configuration.
* The Kafka cluster version has to be defined before the consumer/producer is initialized.
*/
c := sarama.NewConfig()
// Configuration options go here
log.Println("Starting a new Sarama consumer")
if verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
version, err := sarama.ParseKafkaVersion("2.1.1")
if err != nil {
log.Panicf("Error parsing Kafka version: %v", err)
}
c.Version = version
if oldest {
c.Consumer.Offsets.Initial = sarama.OffsetOldest
}
/**
* Setup a new Sarama consumer group
*/
consumer := Consumer{ready: make(chan bool)}
ctx, cancel := context.WithCancel(context.Background())
client, err := sarama.NewConsumerGroup(strings.Split(config.host, ","), config.group, c)
if err != nil {
log.Panicf("Error creating consumer group client: %v", err)
}
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
for {
if err := client.Consume(ctx, []string{config.topic}, &consumer); err != nil {
log.Panicf("Error from consumer: %v", err)
}
// check if context was cancelled, signaling that the consumer should stop
if ctx.Err() != nil {
return
}
consumer.ready = make(chan bool)
}
}()
<-consumer.ready // Await till the consumer has been set up
log.Println("Sarama consumer up and running!...")
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
select {
case <-ctx.Done():
log.Println("terminating: context cancelled")
case <-sigterm:
log.Println("terminating: via signal")
}
cancel()
wg.Wait()
if err = client.Close(); err != nil {
log.Panicf("Error closing client: %v", err)
}
i := &KafkaInput{
config: config,
// consumers: make([]sarama.PartitionConsumer, len(partitions)),
// messages: make(chan *sarama.ConsumerMessage, 256),
messages: make(chan *sarama.ConsumerMessage, 256),
}
return i
}
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
// NOTE:
// Do not move the code below to a goroutine.
// The `ConsumeClaim` itself is called within a goroutine, see:
// https://github.com/Shopify/sarama/blob/master/consumer_group.go#L27-L29
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", string(message.Value), message.Timestamp, message.Topic)
session.MarkMessage(message, "")
}
return nil
}
// ErrorHandler should receive errors
func (i *KafkaInput) ErrorHandler(consumer sarama.PartitionConsumer) {
for err := range consumer.Errors() {
log.Println("Failed to read access log entry:", err)
}
}
// Read Comment
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
func (i *KafkaInput) String() string {
return "Kafka Input: " + i.config.host + "/" + i.config.topic
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error {
// Mark the consumer as ready
close(consumer.ready)
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
return nil
}
The KafkaConfig carries the groupID and Topic for the consumer. When I run this program the consumer fires up and reads from the proper topic using the correct group and prints it to the STDOUT using the ConsumerClaim created in this function:
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", string(message.Value), message.Timestamp, message.Topic)
session.MarkMessage(message, "")
}
return nil
}
What I believe I need however is for the NewKafkaInput function to return *KafkaInput with the messages from the claim added to the struct (forgive me if I am using the wrong terminology here, this is my first Go rodeo).
...
i := &KafkaInput{
config: config,
// consumers: make([]sarama.PartitionConsumer, len(partitions)),
// messages: make(chan *sarama.ConsumerMessage, 256),
messages: make(chan *sarama.ConsumerMessage, 256),
}
return i
}
In the original example that is done here:
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
...
go func(consumer sarama.PartitionConsumer) {
defer consumer.Close()
for message := range consumer.Messages() {
i.messages <- message
}
}(consumer)
...
}
I have spent days toying around with moving functions in and out of the NewKafakInput function, attempting to add messages to the KafakInput struct outside the function and everything in between. I just can't get it to work. The NewKafakInput function needs to return the *KafkaInput with any messages so that this function can complete:
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
Its entirely possible I have made a complete mess of this thing as well, but any help and input is appreciated.
Thanks
Here is the solution to my problem. I had goroutines blocking the main function(s) and they needed to be broken out. If the code below doesn't make any sense, here is a link to the program I was modifying: https://github.com/buger/goreplay. If I can get a response from the owner I plan on cleaning up the code and submitting a pull request, or possibly publishing a fork.
package main
import (
"context"
"encoding/json"
"strings"
"os"
"log"
"github.com/Shopify/sarama"
)
// KafkaInput is used for recieving Kafka messages and
// transforming them into HTTP payloads.
type KafkaInput struct {
sarama.ConsumerGroup
config *KafkaConfig
consumer Consumer
messages chan *sarama.ConsumerMessage
}
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
messages chan *sarama.ConsumerMessage
}
var (
brokers = ""
version = ""
group = ""
topics = ""
assignor = ""
oldest = true
verbose = false
)
// NewKafkaInput creates instance of kafka consumer client.
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
/**
* Construct a new Sarama configuration.
* The Kafka cluster version has to be defined before the consumer/producer is initialized.
*/
c := sarama.NewConfig()
// Configuration options go here
log.Printf("KafkaConfig: %s", config.host)
log.Printf("KafkaConfig: %s", config.group)
log.Printf("KafkaConfig: %s", config.topic)
log.Println("Starting a new Sarama consumer")
if verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
version, err := sarama.ParseKafkaVersion("2.1.1")
if err != nil {
log.Panicf("Error parsing Kafka version: %v", err)
}
c.Version = version
if oldest {
c.Consumer.Offsets.Initial = sarama.OffsetOldest
}
group, err := sarama.NewConsumerGroup(strings.Split(config.host, ","), config.group, c)
/**
* Setup a new Sarama consumer group
*/
consumer := Consumer{
ready: make(chan bool),
messages: make(chan *sarama.ConsumerMessage, 256),
}
i := &KafkaInput{
ConsumerGroup: group,
config: config,
messages: make(chan *sarama.ConsumerMessage, 256),
consumer: consumer,
}
go i.loop([]string{config.topic})
i.messages = consumer.messages
return i
}
//ConsumeClaim and stuff
func (i *KafkaInput) ConsumeClaim(s sarama.ConsumerGroupSession, c sarama.ConsumerGroupClaim) error {
for msg := range c.Messages() {
s.MarkMessage(msg, "")
i.Push(msg)
}
return nil
}
func (i *KafkaInput) loop(topic []string) {
ctx := context.Background()
for {
if err := i.Consume(ctx, []string{i.config.topic}, i); err != nil {
return
}
}
}
// Push Messages
func (i *KafkaInput) Push(m *sarama.ConsumerMessage) {
if i.consumer.messages != nil {
log.Printf("MSGPUSH: %s", m)
i.consumer.messages <- m
}
}
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
log.Printf("Msg: %s", string(message.Value))
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
func (i *KafkaInput) String() string {
return "Kafka Input: " + i.config.host + "/" + i.config.topic
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (i *KafkaInput) Setup(s sarama.ConsumerGroupSession) error {
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (i *KafkaInput) Cleanup(s sarama.ConsumerGroupSession) error {
return nil
}

How do I send Protobuf Messages via a Kafka Producer

I am using the Sarama Library to send messages through a Producer.
This allows me to send strings. My goal is to send Protobuf Messages
msg := &sarama.ProducerMessage{
Topic: *topic,
Value: sarama.StringEncoder(content),
}
This is a sample proto class that I have
message Pixel {
// Session identifier stuff
int64 timestamp = 1; // Milliseconds from the epoch
string session_id = 2; // Unique Identifier... for parent level0top
string client_name = 3; // Client-name/I-key
string ip = 10;
repeated string ip_list = 11;
string datacenter = 12;
string proxy_type = 13;
Please can you provide me an example of how I can send protobuf messages.
You need to use proto#Marshal and sarama#ByteEncoder on producer side and proto#Unmarshal on consumer side.
Producer:
pixelToSend := &pixel.Pixel{SessionId: t.String()}
pixelToSendBytes, err := proto.Marshal(pixelToSend)
if err != nil {
log.Fatalln("Failed to marshal pixel:", err)
}
msg := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.ByteEncoder(pixelToSendBytes),
}
Consumer:
receivedPixel := &pixel.Pixel{}
err := proto.Unmarshal(msg.Value, receivedPixel)
if err != nil {
log.Fatalln("Failed to unmarshal pixel:", err)
}
log.Printf("Pixel received: %s", receivedPixel)
Complete example:
package main
import (
pixel "example/pixel"
"log"
"os"
"os/signal"
"syscall"
"time"
"github.com/Shopify/sarama"
"github.com/golang/protobuf/proto"
)
func main() {
topic := "your-topic-name"
brokerList := []string{"localhost:29092"}
producer, err := newSyncProducer(brokerList)
if err != nil {
log.Fatalln("Failed to start Sarama producer:", err)
}
go func() {
ticker := time.NewTicker(time.Second)
for {
select {
case t := <-ticker.C:
pixelToSend := &pixel.Pixel{SessionId: t.String()}
pixelToSendBytes, err := proto.Marshal(pixelToSend)
if err != nil {
log.Fatalln("Failed to marshal pixel:", err)
}
msg := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.ByteEncoder(pixelToSendBytes),
}
producer.SendMessage(msg)
log.Printf("Pixel sent: %s", pixelToSend)
}
}
}()
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
partitionConsumer, err := newPartitionConsumer(brokerList, topic)
if err != nil {
log.Fatalln("Failed to create Sarama partition consumer:", err)
}
log.Println("Waiting for messages...")
for {
select {
case msg := <-partitionConsumer.Messages():
receivedPixel := &pixel.Pixel{}
err := proto.Unmarshal(msg.Value, receivedPixel)
if err != nil {
log.Fatalln("Failed to unmarshal pixel:", err)
}
log.Printf("Pixel received: %s", receivedPixel)
case <-signals:
log.Print("Received termination signal. Exiting.")
return
}
}
}
func newSyncProducer(brokerList []string) (sarama.SyncProducer, error) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
// TODO configure producer
producer, err := sarama.NewSyncProducer(brokerList, config)
if err != nil {
return nil, err
}
return producer, nil
}
func newPartitionConsumer(brokerList []string, topic string) (sarama.PartitionConsumer, error) {
conf := sarama.NewConfig()
// TODO configure consumer
consumer, err := sarama.NewConsumer(brokerList, conf)
if err != nil {
return nil, err
}
partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetOldest)
if err != nil {
return nil, err
}
return partitionConsumer, err
}

emersion/go-imap - Mark message as seen

I'm trying to mark messages as seen using this IMAP protocol implementation but It's not working as intended.
I have a function that prints unseen messages and my intention is that by the end, it mark each message as seen.
package main
import (
"emailmonitor/util"
"fmt"
)
func main() {
serverGmail := util.NewServerGmail()
serverGmail.Connect()
serverGmail.Login()
serverGmail.ListUnseenMessages()
}
//-----------------------------------------
package util
import (
"io/ioutil"
"log"
"net/mail"
"net/smtp"
imap "github.com/emersion/go-imap"
"github.com/emersion/go-imap/client"
)
type ServerGmail struct {
user string
pass string
erro string
cliente *client.Client
}
func NewServerGmail() *ServerGmail {
serverGmail := &ServerGmail{}
serverGmail.user = "xxxxxx#gmail.com"
serverGmail.pass = "xxxxx"
serverGmail.erro = ""
return serverGmail
}
func (serverGmail *ServerGmail) Connect() {
// Connect to server
cliente, erro := client.DialTLS("smtp.gmail.com:993", nil)
if erro != nil {
serverGmail.erro = erro.Error()
}
log.Println("Connected")
serverGmail.cliente = cliente
}
func (serverGmail *ServerGmail) Login() {
// Login
if erro := serverGmail.cliente.Login(serverGmail.user, serverGmail.pass); erro != nil {
serverGmail.erro = erro.Error()
}
log.Println("Logged")
}
func (serverGmail *ServerGmail) setLabelBox(label string) *imap.MailboxStatus {
mailbox, erro := serverGmail.cliente.Select(label, true)
if erro != nil {
serverGmail.erro = erro.Error()
}
return mailbox
}
func (serverGmail *ServerGmail) ListUnseenMessages() {
// set mailbox to INBOX
serverGmail.setLabelBox("INBOX")
// criteria to search for unseen messages
criteria := imap.NewSearchCriteria()
criteria.WithoutFlags = []string{"\\Seen"}
uids, err := serverGmail.cliente.UidSearch(criteria)
if err != nil {
log.Println(err)
}
seqSet := new(imap.SeqSet)
seqSet.AddNum(uids...)
section := &imap.BodySectionName{}
items := []imap.FetchItem{imap.FetchEnvelope, imap.FetchFlags, imap.FetchInternalDate, section.FetchItem()}
messages := make(chan *imap.Message)
go func() {
if err := serverGmail.cliente.UidFetch(seqSet, items, messages); err != nil {
log.Fatal(err)
}
}()
for message := range messages {
log.Println(message.Uid)
if message == nil {
log.Fatal("Server didn't returned message")
}
r := message.GetBody(section)
if r == nil {
log.Fatal("Server didn't returned message body")
}
// Create a new mail reader
mr, err := mail.CreateReader(r)
if err != nil {
log.Fatal(err)
}
// Print some info about the message
header := mr.Header
if date, err := header.Date(); err == nil {
log.Println("Date:", date)
}
if from, err := header.AddressList("From"); err == nil {
log.Println("From:", from)
}
if to, err := header.AddressList("To"); err == nil {
log.Println("To:", to)
}
if subject, err := header.Subject(); err == nil {
log.Println("Subject:", subject)
}
// MARK "SEEN" ------- STARTS HERE ---------
seqSet.Clear()
seqSet.AddNum(message.Uid)
item := imap.FormatFlagsOp(imap.AddFlags, true)
flags := []interface{}{imap.SeenFlag}
erro := serverGmail.cliente.UidStore(seqSet, item, flags, nil)
if erro != nil {
panic("error!")
}
}
}
Link from Documentation: https://godoc.org/github.com/emersion/go-imap/client#Client.UidStore
Tried to do something similar to Store example.
What can be done to fix it?
modify the following line by changing true to false
mailbox, erro := serverGmail.cliente.Select(label, true)
once you've done this, when the message is fetched (using the UidFetch), it will be automatically marked to "Seen"

Golang Gorilla Websocket stops receiving information at 120 seconds

I'm currently trying to connect to the CEX.IO bitcoin exchange's websocket, but have been having issues not only with CEX.IO but with others too. All of my connections drop around the 120-second mark which makes me think there is some TTL problem going on. The Process() goroutine in the main package ends up just hanging and waiting for data from the readLoop which just stops receiving data. I've included some read-only API keys in the code so you can test if you'd like.
package main
import (
"fmt"
"bitbucket.org/tradedefender/cryptocurrency/exchange-connector/cexio"
"github.com/shopspring/decimal"
"encoding/json"
"time"
)
type OrderBook struct {
Asks []Ask
Bids []Bid
}
type Ask struct {
Rate decimal.Decimal
Amount decimal.Decimal
}
type Bid struct {
Rate decimal.Decimal
Amount decimal.Decimal
}
func main() {
cexioConn := new(cexio.Connection)
err := cexioConn.Connect()
if err != nil {
fmt.Errorf("error: %s", err.Error())
}
err = cexioConn.Authenticate("TLwYkktLf7Im6nqSKt6UO1IrU", "9ImOJcR7Qj3LMIyPCzky0D7WE")
if err != nil {
fmt.Errorf("error: %s", err.Error())
}
readChannel := make(chan cexio.IntraAppMessage, 25)
go cexioConn.ReadLoop(readChannel)
processor := Processor{
WatchPairs: [][2]string{
[2]string{
"BTC", "USD",
},
},
conn: cexioConn,
}
go processor.Process(readChannel)
// LOL
for {
continue
}
}
type Processor struct {
WatchPairs [][2]string
conn *cexio.Connection
}
func (p *Processor) Process(ch <-chan cexio.IntraAppMessage) {
p.conn.SubscribeToOrderBook(p.WatchPairs[0])
pingTimer := time.Now().Unix()
for {
fmt.Printf("(%v)\n", time.Now().Unix())
if (time.Now().Unix() - pingTimer) >= 10 {
fmt.Println("sending ping")
p.conn.SendPing()
pingTimer = time.Now().Unix()
}
readMsg := <- ch
output, _ := json.Marshal(readMsg.SocketMessage)
fmt.Println(string(output))
if readMsg.SocketMessage.Event == "ping" {
fmt.Println("sending pong")
p.conn.SendPong()
pingTimer = time.Now().Unix()
}
}
}
Below is the connector to the cexio websocket. Here is a link to their API: https://cex.io/websocket-api
package cexio
import (
"github.com/gorilla/websocket"
//"github.com/shopspring/decimal"
"github.com/satori/go.uuid"
"encoding/hex"
"encoding/json"
"crypto/hmac"
"crypto/sha256"
"bytes"
"strconv"
"time"
"fmt"
)
const Url = "wss://ws.cex.io/ws/"
type Connection struct {
conn *websocket.Conn
}
type IntraAppMessage struct {
SocketMessage GenericMessage
ProgramMessage ProgramMessage
}
type GenericMessage struct {
Event string `json:"e"`
Data interface{} `json:"data"`
Auth AuthData `json:"auth,omitempty"`
Ok string `json:"ok,omitempty"`
Oid string `json:"oid,omitempty"`
Time int64 `json:"time,omitempty"`
}
type ProgramMessage struct {
Error string
}
type AuthData struct {
Key string `json:"key"`
Signature string `json:"signature"`
Timestamp int64 `json:"timestamp"`
}
type OrderBookSubscribeData struct {
Pair [2]string `json:"pair"`
Subscribe bool `json:"subscribe"`
Depth int `json:"depth"`
}
func (c *Connection) SendPong() error {
pongMsg := GenericMessage{
Event: "pong",
}
err := c.conn.WriteJSON(pongMsg)
if err != nil {
return nil
}
deadline := time.Now().Add(15*time.Second)
err = c.conn.WriteControl(websocket.PongMessage, nil, deadline)
if err != nil {
return err
}
return nil
}
func (c *Connection) SendPing() error {
pingMsg := GenericMessage{
Event: "get-balance",
Oid: uuid.NewV4().String(),
}
err := c.conn.WriteJSON(pingMsg)
if err != nil {
return err
}
deadline := time.Now().Add(15*time.Second)
err = c.conn.WriteControl(websocket.PingMessage, nil, deadline)
if err != nil {
return err
}
return nil
}
func (c *Connection) Connect() error {
dialer := *websocket.DefaultDialer
wsConn, _, err := dialer.Dial(Url, nil)
if err != nil {
return err
}
c.conn = wsConn
//c.conn.SetPingHandler(c.HandlePing)
for {
_, msgBytes, err := c.conn.ReadMessage()
if err != nil {
c.Disconnect()
return err
}
fmt.Println(string(msgBytes))
var m GenericMessage
err = json.Unmarshal(msgBytes, &m)
if err != nil {
c.Disconnect()
return err
}
if m.Event != "connected" {
c.Disconnect()
return err
} else {
break
}
}
return nil
}
func (c *Connection) Disconnect() error {
return c.conn.Close()
}
func (c *Connection) ReadLoop(ch chan<- IntraAppMessage) {
for {
fmt.Println("starting new read")
_, msgBytes, err := c.conn.ReadMessage()
if err != nil {
ch <- IntraAppMessage{
ProgramMessage: ProgramMessage{
Error: err.Error(),
},
}
continue
}
var m GenericMessage
err = json.Unmarshal(msgBytes, &m)
if err != nil {
ch <- IntraAppMessage{
ProgramMessage: ProgramMessage{
Error: err.Error(),
},
}
continue
}
ch <- IntraAppMessage{
SocketMessage: m,
}
}
}
func CreateSignature(timestamp int64, key, secret string) string {
secretBytes := []byte(secret)
h := hmac.New(sha256.New, secretBytes)
var buffer bytes.Buffer
buffer.WriteString(strconv.FormatInt(timestamp, 10))
buffer.WriteString(key)
h.Write(buffer.Bytes())
return hex.EncodeToString(h.Sum(nil))
}
func (c *Connection) Authenticate(key, secret string) error {
timestamp := time.Now().Unix()
signature := CreateSignature(timestamp, key, secret)
var authMsg GenericMessage
authMsg.Event = "auth"
authMsg.Auth = AuthData{
Key: key,
Signature: signature,
Timestamp: timestamp,
}
err := c.conn.WriteJSON(authMsg)
if err != nil {
return err
}
for {
_, msgBytes, err := c.conn.ReadMessage()
if err != nil {
c.Disconnect()
return err
}
fmt.Println(string(msgBytes))
var m GenericMessage
err = json.Unmarshal(msgBytes, &m)
if err != nil {
c.Disconnect()
return err
}
if m.Event != "auth" && m.Ok != "ok" {
c.Disconnect()
return err
} else {
break
}
}
return nil
}
func (c *Connection) SubscribeToOrderBook(pair [2]string) error {
sendMsg := GenericMessage{
Event: "order-book-subscribe",
Data: OrderBookSubscribeData{
Pair: pair,
Subscribe: true,
Depth: 0,
},
Oid: uuid.NewV4().String(),
}
err := c.conn.WriteJSON(sendMsg)
if err != nil {
return err
}
return nil
}
func (c *Connection) GetBalance() error {
sendMsg := GenericMessage{
Event: "get-balance",
Oid: uuid.NewV4().String(),
}
err := c.conn.WriteJSON(sendMsg)
if err != nil {
return err
}
return nil
}
Solution was to remove the
for {
continue
}
at the end of the main function

Resources