Good afternoon. I am trying to solve a queue problem with radis stream.
Task description: we need to implement a queue to store the data that we will send to the user's email.
How it works: I have a queue, when I create a queue, I also create a group that I will work with, also inside will be a worker that will check messages with status pending through ticker to resend data to the queue
Problems:
CheckPending method - when I call the method to check pending messages, I get a blank response, such as: []
How can I make CheckPending work in the background once, say every minute, to check the data for pending status and re-add it to the list?
Queue.go
package main
import (
"context"
"errors"
"github.com/go-redis/redis/v8"
"github.com/rs/zerolog"
"strings"
"sync"
"time"
)
// ErrNoGroup you can get this error if you forget to create the group, but you still send a request for data
var ErrNoGroup = errors.New("no group has been created")
// ErrNoStream you can get this error if you forget to create a thread, but still send a request to retrieve data
var ErrNoStream = errors.New("")
type Options struct {
Name string
Redis *redis.Client
Logger *zerolog.Logger
}
type Queue struct {
Client *redis.Client
Name string
Group string
WG sync.WaitGroup
Logger *zerolog.Logger
}
const interval = 500
func New(options *Options) *Queue {
logger := options.Logger.With().Str("service", "queue").Logger()
q := &Queue{
Client: options.Redis,
Name: options.Name + "stream",
Group: options.Name + "group",
Logger: &logger,
}
// This command creates a new consumer group
err := q.Client.XGroupCreateMkStream(context.Background(), q.Name, q.Group, "0").Err()
if err != nil {
return nil
}
ctx, cancel := context.WithCancel(context.Background())
q.WG.Add(1)
go func() {
time.Sleep(60 * interval * time.Millisecond)
defer q.WG.Done()
cancel()
}()
errP := q.CheckPending(ctx)
if errP != nil {
return nil
}
return q
}
// CheckPending TODO: not found data
func (q *Queue) CheckPending(ctx context.Context) error {
l := q.methodLogger(ctx, "Queue CheckPending")
l.Debug().Msg("check pending task")
ticker := time.NewTicker(interval * time.Millisecond)
for {
select {
case <-ticker.C:
pending, err := q.Client.XPendingExt(ctx, &redis.XPendingExtArgs{
Stream: q.Name,
Group: q.Group,
Start: "-",
End: "+",
Count: 10,
}).Result()
if err != nil {
if strings.HasPrefix(err.Error(), "NOGROUP") {
return ErrNoGroup
}
if strings.HasPrefix(err.Error(), "NOSTREAM") {
return ErrNoStream
}
return err
}
if len(pending) == 0 {
return nil
}
fmt.Print(pending) // return []
break
case <-ctx.Done():
return nil
}
}
}
Main.go
func main() {
rds := redis.NewClient(&redis.Options{
Addr: ":6379",
})
q := queue.New(rds, "email")
data := map[string]interface{}{"email": "email#gmail.com", "message": "We have received you order and we are working on it."}
err := q.Add(context.Background(), data)
fmt.Print(err)
q.Read(context.TODO())
// Example
err := q.CheckPending(context.TODO()) // return []
if err != nil {
fmt.Print(err)
}
}
In my redis repository I have data that I put there three days ago which I have not interacted with in any way, but I closed only a selection using the XACK command
Why can't I get data out of the repository via my CheckPending method?
Related
So basically I'm writing a go test for my chat application and for some reason the if I write Test_saveMessage function in the top of this file my tests go through and they work fine, however if I write the Test_InitRouter in the top of this file - my server opens and the test doesn't finish. As if it would be listening for more requests. Does anyone know the reason of why this could be happening? Here is the that does not work code:
package messenger
import (
"fmt"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
"net/http/httptest"
"strings"
"testing"
)
var testMessage = Message{
Username: "Name",
Message: "Test message"}
//Tests InitRouter both sending and receiving messages
func Test_InitRouter(t *testing.T) {
var receivedMessage Message
//Create test server with the InitRouter handler
s := httptest.NewServer(InitRouter())
defer s.Close()
// Convert URL from http to ws
u := "ws" + strings.TrimPrefix(s.URL, "http")
fmt.Println(u)
// Connect to the test server
ws, _, err := websocket.DefaultDialer.Dial(u, nil)
if err != nil {
t.Fatalf("%v", err)
}
defer ws.Close()
//Send message to the server read received message and see if it's the same
if err != ws.WriteJSON(testMessage) {
t.Fatalf("%v", err)
}
err = ws.ReadJSON(&receivedMessage)
if err != nil {
t.Fatalf("%v", err)
}
if receivedMessage != testMessage {
t.Fatalf("%v", err)
}
}
//Test for the saveMessage function
func Test_saveMessage(t *testing.T) {
saveMessage(testMessage)
assert.Equal(t, 1, len(messages), "Expected to have 1 message")
}
As soon as I move the Test_saveMessage function to the top it starts working properly.
Here is the code for the handler:
package messenger
import (
"fmt"
"github.com/go-chi/chi"
"github.com/gorilla/websocket"
log "github.com/sirupsen/logrus"
"net/http"
)
func InitRouter() http.Handler {
r := chi.NewRouter()
r.Get("/", GetWebsocket)
return r
}
var clients = make(map[*websocket.Conn]bool) // connected clients
var broadcast = make(chan Message) // broadcast channel
var messages = []Message{}
func GetWebsocket(w http.ResponseWriter, r *http.Request) {
// Upgrade initial GET request to a websocket
upgrader := websocket.Upgrader{}
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Error(err)
}
// Close the connection when the function returns
defer ws.Close()
// Register our new client and send him the chat history
clients[ws] = true
serveInitialMessages(ws)
//initialize message sending logic
sendMessages(ws)
}
// Sends messages from a particular websocket to the channel
func sendMessages(ws *websocket.Conn){
for {
var msg Message
// Read in a new message as JSON and map it to a Message object
err := ws.ReadJSON(&msg)
if err != nil {
log.Info(err)
delete(clients, ws)
break
}
// Send the newly received message to the broadcast channel
broadcast <- msg
saveMessage(msg)
}
}
func HandleMessages() {
for {
// Grab the next message from the broadcast channel
msg := <-broadcast
fmt.Println(msg)
// Send it out to every client that is currently connected
for client := range clients {
err := client.WriteJSON(msg)
if err != nil {
log.Printf("error: %v", err)
client.Close()
delete(clients, client)
}
}
}
}
func saveMessage(m Message) {
if len(messages) >= 50 {
messages = messages[1:]
}
messages = append(messages, m)
}
func serveInitialMessages(ws *websocket.Conn) {
for _, m := range messages {
err := ws.WriteJSON(m)
if err != nil {
log.Error(err)
}
}
}
I am new to Golang and Kubernetes. I tried to create a custom controller in golang using the client-go library. The controller connects with the K8s Api server brings the pods details into cache and sends it to the workqueue where I perform some actions on the pods. However I want the process to be fast and for that I need to create multiple workers. How to create multiple workers which could act upon the same workqueue and enhance the speed of the code?
Below is the sample of my controller:
package main
import (
"context"
"flag"
"fmt"
"log"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/watch"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
rs "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/workqueue"
)
type Controller struct {
clientset kubernetes.Interface
queue workqueue.RateLimitingInterface
informer cache.SharedIndexInformer
}
var (
//used the config file
kubeconfig = flag.String("kubeconfig", "location", "absolute path to the kubeconfig file")
)
// Creating the SharedIndexInformer to bring the details into the cache
func CreateSharedIndexInformer() {
flag.Parse()
//creating config using the kubeconfig file
configuration, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
fmt.Println("Unable to find the file")
}
cs, err := kubernetes.NewForConfig(configuration)
//Creating the queue
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
pods, err := cs.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{})
//Creating the SharedIndexInformer
informer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (rs.Object, error) {
return cs.CoreV1().Pods("").List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return cs.CoreV1().Pods("").Watch(context.TODO(), options)
},
},
&v1.Pod{},
time.Second*10, //Skip resync
cache.Indexers{},
)
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err == nil {
queue.Add(key)
}
}
},
DeleteFunc: func(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err == nil {
queue.Add(key)
}
},
})
controller := &Controller{
clientset: cs,
queue: queue,
informer: informer,
}
stop := make(chan struct{})
go controller.Run(stop)
// Wait forever
select {}
}
func (c *Controller) Run(stopCh chan struct{}) {
// don't let panics crash the process
defer runtime.HandleCrash()
// make sure the work queue is shutdown which will trigger workers to end
defer c.queue.ShutDown()
//c.logger.Info("Starting kubewatch controller")
go c.informer.Run(stopCh)
// wait for the caches to synchronize before starting the worker
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
return
}
//c.logger.Info("Kubewatch controller synced and ready")
// runWorker will loop until "something bad" happens. The .Until will
// then rekick the worker after one second
go wait.Until(c.runWorker, time.Second, stopCh)
<-stopCh
}
func (c *Controller) runWorker() {
// processNextWorkItem will automatically wait until there's work available
for c.processNextItem() {
// continue looping
}
}
// processNextWorkItem deals with one key off the queue. It returns false
// when it's time to quit.
func (c *Controller) processNextItem() bool {
// pull the next work item from queue. It should be a key we use to lookup
// something in a cache
key, quit := c.queue.Get()
if quit {
return false
}
// you always have to indicate to the queue that you've completed a piece of
// work
defer c.queue.Done(key)
var obj string
var ok bool
if obj, ok = key.(string); !ok {
// As the item in the workqueue is actually invalid, we call
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.queue.Forget(key)
runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
}
// do your work on the key.
err := c.processBusinessLogic(key.(string))
if err == nil {
// No error, tell the queue to stop tracking history
c.queue.Forget(key)
} else if c.queue.NumRequeues(key) < 10 {
//c.logger.Errorf("Error processing %s (will retry): %v", key, err)
// requeue the item to work on later
c.queue.AddRateLimited(key)
} else {
// err != nil and too many retries
//c.logger.Errorf("Error processing %s (giving up): %v", key, err)
c.queue.Forget(key)
runtime.HandleError(err)
}
return true
}
func (c *Controller) processBusinessLogic(key string) error {
obj, exists, err := c.informer.GetIndexer().GetByKey(key)
if err != nil {
glog.Errorf("Fetching object with key %s from store failed with %v", key, err)
return err
}
if !exists {
// Below we will warm up our cache with a Pod, so that we will see a delete for one pod
fmt.Printf("Pod %s does not exist anymore\n", key)
} else {
//Perform some business logic over the pods or Deployment
// Note that you also have to check the uid if you have a local controlled resource, which
// is dependent on the actual instance, to detect that a Pod was recreated with the same name
fmt.Printf("Add event for Pod %s\n", obj.(*v1.Pod).GetName())
}
}
}
return nil
}
func (c *Controller) handleErr(err error, key interface{}) {
glog.Infof("Dropping pod %q out of the queue: %v", key, err)
}
func main() {
CreateSharedIndexInformer()
}
You can just add more workers in your Run function like follows:
func (c *Controller) Run(stopCh chan struct{}) {
...
// runWorker will loop until "something bad" happens. The .Until will
// then rekick the worker after one second
for i := 0; i < 5; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
<-stopCh
}
I am very new to Go Lang and attempting to make some adjustments to an open source library that consumes messages from Kafka using the Sarama library. The original code can be found here.
The original package implements a PartitionConsumer that works just fine if one doesn't need read consistency across multiple consumers consuming the same topic, however, that does not work for me.
I have done some work within the same application to implement the sarama NewConsumerGroup package using some examples I have found online.
Below is the code I currently have running:
package main
import (
"context"
// "flag"
"os"
"os/signal"
"sync"
"syscall"
"encoding/json"
"log"
"strings"
"github.com/Shopify/sarama"
// "github.com/Shopify/sarama/mocks"
)
// KafkaInput is used for recieving Kafka messages and
// transforming them into HTTP payloads.
type KafkaInput struct {
config *KafkaConfig
// consumers []sarama.PartitionConsumer
messages chan *sarama.ConsumerMessage
}
var (
brokers = ""
version = ""
group = ""
topics = ""
assignor = ""
oldest = true
verbose = false
)
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
}
// NewKafkaInput creates instance of kafka consumer client.
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
/**
* Construct a new Sarama configuration.
* The Kafka cluster version has to be defined before the consumer/producer is initialized.
*/
c := sarama.NewConfig()
// Configuration options go here
log.Println("Starting a new Sarama consumer")
if verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
version, err := sarama.ParseKafkaVersion("2.1.1")
if err != nil {
log.Panicf("Error parsing Kafka version: %v", err)
}
c.Version = version
if oldest {
c.Consumer.Offsets.Initial = sarama.OffsetOldest
}
/**
* Setup a new Sarama consumer group
*/
consumer := Consumer{ready: make(chan bool)}
ctx, cancel := context.WithCancel(context.Background())
client, err := sarama.NewConsumerGroup(strings.Split(config.host, ","), config.group, c)
if err != nil {
log.Panicf("Error creating consumer group client: %v", err)
}
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
for {
if err := client.Consume(ctx, []string{config.topic}, &consumer); err != nil {
log.Panicf("Error from consumer: %v", err)
}
// check if context was cancelled, signaling that the consumer should stop
if ctx.Err() != nil {
return
}
consumer.ready = make(chan bool)
}
}()
<-consumer.ready // Await till the consumer has been set up
log.Println("Sarama consumer up and running!...")
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
select {
case <-ctx.Done():
log.Println("terminating: context cancelled")
case <-sigterm:
log.Println("terminating: via signal")
}
cancel()
wg.Wait()
if err = client.Close(); err != nil {
log.Panicf("Error closing client: %v", err)
}
i := &KafkaInput{
config: config,
// consumers: make([]sarama.PartitionConsumer, len(partitions)),
// messages: make(chan *sarama.ConsumerMessage, 256),
messages: make(chan *sarama.ConsumerMessage, 256),
}
return i
}
// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
// NOTE:
// Do not move the code below to a goroutine.
// The `ConsumeClaim` itself is called within a goroutine, see:
// https://github.com/Shopify/sarama/blob/master/consumer_group.go#L27-L29
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", string(message.Value), message.Timestamp, message.Topic)
session.MarkMessage(message, "")
}
return nil
}
// ErrorHandler should receive errors
func (i *KafkaInput) ErrorHandler(consumer sarama.PartitionConsumer) {
for err := range consumer.Errors() {
log.Println("Failed to read access log entry:", err)
}
}
// Read Comment
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
func (i *KafkaInput) String() string {
return "Kafka Input: " + i.config.host + "/" + i.config.topic
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (consumer *Consumer) Setup(sarama.ConsumerGroupSession) error {
// Mark the consumer as ready
close(consumer.ready)
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error {
return nil
}
The KafkaConfig carries the groupID and Topic for the consumer. When I run this program the consumer fires up and reads from the proper topic using the correct group and prints it to the STDOUT using the ConsumerClaim created in this function:
func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
for message := range claim.Messages() {
log.Printf("Message claimed: value = %s, timestamp = %v, topic = %s", string(message.Value), message.Timestamp, message.Topic)
session.MarkMessage(message, "")
}
return nil
}
What I believe I need however is for the NewKafkaInput function to return *KafkaInput with the messages from the claim added to the struct (forgive me if I am using the wrong terminology here, this is my first Go rodeo).
...
i := &KafkaInput{
config: config,
// consumers: make([]sarama.PartitionConsumer, len(partitions)),
// messages: make(chan *sarama.ConsumerMessage, 256),
messages: make(chan *sarama.ConsumerMessage, 256),
}
return i
}
In the original example that is done here:
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
...
go func(consumer sarama.PartitionConsumer) {
defer consumer.Close()
for message := range consumer.Messages() {
i.messages <- message
}
}(consumer)
...
}
I have spent days toying around with moving functions in and out of the NewKafakInput function, attempting to add messages to the KafakInput struct outside the function and everything in between. I just can't get it to work. The NewKafakInput function needs to return the *KafkaInput with any messages so that this function can complete:
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
Its entirely possible I have made a complete mess of this thing as well, but any help and input is appreciated.
Thanks
Here is the solution to my problem. I had goroutines blocking the main function(s) and they needed to be broken out. If the code below doesn't make any sense, here is a link to the program I was modifying: https://github.com/buger/goreplay. If I can get a response from the owner I plan on cleaning up the code and submitting a pull request, or possibly publishing a fork.
package main
import (
"context"
"encoding/json"
"strings"
"os"
"log"
"github.com/Shopify/sarama"
)
// KafkaInput is used for recieving Kafka messages and
// transforming them into HTTP payloads.
type KafkaInput struct {
sarama.ConsumerGroup
config *KafkaConfig
consumer Consumer
messages chan *sarama.ConsumerMessage
}
// Consumer represents a Sarama consumer group consumer
type Consumer struct {
ready chan bool
messages chan *sarama.ConsumerMessage
}
var (
brokers = ""
version = ""
group = ""
topics = ""
assignor = ""
oldest = true
verbose = false
)
// NewKafkaInput creates instance of kafka consumer client.
func NewKafkaInput(address string, config *KafkaConfig) *KafkaInput {
/**
* Construct a new Sarama configuration.
* The Kafka cluster version has to be defined before the consumer/producer is initialized.
*/
c := sarama.NewConfig()
// Configuration options go here
log.Printf("KafkaConfig: %s", config.host)
log.Printf("KafkaConfig: %s", config.group)
log.Printf("KafkaConfig: %s", config.topic)
log.Println("Starting a new Sarama consumer")
if verbose {
sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
}
version, err := sarama.ParseKafkaVersion("2.1.1")
if err != nil {
log.Panicf("Error parsing Kafka version: %v", err)
}
c.Version = version
if oldest {
c.Consumer.Offsets.Initial = sarama.OffsetOldest
}
group, err := sarama.NewConsumerGroup(strings.Split(config.host, ","), config.group, c)
/**
* Setup a new Sarama consumer group
*/
consumer := Consumer{
ready: make(chan bool),
messages: make(chan *sarama.ConsumerMessage, 256),
}
i := &KafkaInput{
ConsumerGroup: group,
config: config,
messages: make(chan *sarama.ConsumerMessage, 256),
consumer: consumer,
}
go i.loop([]string{config.topic})
i.messages = consumer.messages
return i
}
//ConsumeClaim and stuff
func (i *KafkaInput) ConsumeClaim(s sarama.ConsumerGroupSession, c sarama.ConsumerGroupClaim) error {
for msg := range c.Messages() {
s.MarkMessage(msg, "")
i.Push(msg)
}
return nil
}
func (i *KafkaInput) loop(topic []string) {
ctx := context.Background()
for {
if err := i.Consume(ctx, []string{i.config.topic}, i); err != nil {
return
}
}
}
// Push Messages
func (i *KafkaInput) Push(m *sarama.ConsumerMessage) {
if i.consumer.messages != nil {
log.Printf("MSGPUSH: %s", m)
i.consumer.messages <- m
}
}
func (i *KafkaInput) Read(data []byte) (int, error) {
message := <-i.messages
log.Printf("Msg: %s", string(message.Value))
if !i.config.useJSON {
copy(data, message.Value)
return len(message.Value), nil
}
var kafkaMessage KafkaMessage
json.Unmarshal(message.Value, &kafkaMessage)
buf, err := kafkaMessage.Dump()
if err != nil {
log.Println("Failed to decode access log entry:", err)
return 0, err
}
copy(data, buf)
return len(buf), nil
}
func (i *KafkaInput) String() string {
return "Kafka Input: " + i.config.host + "/" + i.config.topic
}
// Setup is run at the beginning of a new session, before ConsumeClaim
func (i *KafkaInput) Setup(s sarama.ConsumerGroupSession) error {
return nil
}
// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (i *KafkaInput) Cleanup(s sarama.ConsumerGroupSession) error {
return nil
}
I'm trying to create a google pubsub subscriber in golang where I take 100 messages at at a time and then write them to influx. I'm trying to use channels to do this like this:
package main
import (
"os"
"fmt"
"cloud.google.com/go/pubsub"
"log"
"sync"
"golang.org/x/net/context"
"encoding/json"
clnt "github.com/influxdata/influxdb/client/v2"
"time"
)
type SensorData struct {
Pressure float64 `json:"pressure"`
Temperature float64 `json:"temperature"`
Dewpoint float64 `json:"dewpoint"`
Timecollected int64 `json:"timecollected"`
Latitude float64 `json:"latitude"`
Longitude float64 `json:"longitude"`
Humidity float64 `json:"humidity"`
SensorID string `json:"sensorId"`
Zipcode int `json:"zipcode"`
Warehouse string `json:"warehouse"`
Area string `json:"area"`
}
type SensorPoints struct {
SensorData []SensorData
}
func main () {
messages := make(chan SensorData, 100)
// Create a new Influx HTTPClient
c, err := clnt.NewHTTPClient(clnt.HTTPConfig{
Addr: "http://localhost:8086",
Username: "user",
Password: "pass",
})
if err != nil {
log.Fatal(err)
}
// Create pubsub subscriber
ctx := context.Background()
proj := os.Getenv("GOOGLE_CLOUD_PROJECT")
if proj == "" {
fmt.Fprintf(os.Stderr, "GOOGLE_CLOUD_PROJECT environment variable must be set.\n")
os.Exit(1)
}
client, err := pubsub.NewClient(ctx, proj)
if err != nil {
log.Fatalf("Could not create pubsub Client: %v", err)
}
const sub = "influxwriter"
//create influx a blank batchpoint set
bp, err := clnt.NewBatchPoints(clnt.BatchPointsConfig{
Database: "sensordata",
Precision: "s",
})
if err != nil {
log.Fatal(err)
}
// Pull messages via the subscription.
go pullMsgs(client, sub, messages)
if err != nil {
log.Fatal(err)
}
writeInflux(messages, bp)
c.Close()
}
func pullMsgs(client *pubsub.Client, name string, messages chan<- SensorData) {
ctx := context.Background()
// [START pubsub_subscriber_async_pull]
// [START pubsub_quickstart_subscriber]
// Consume 10 messages.
var mu sync.Mutex
var sensorinfos SensorPoints
sensorinfo := &SensorData{}
received := 0
sub := client.Subscription(name)
cctx, _ := context.WithCancel(ctx)
err := sub.Receive(cctx, func(ctx context.Context, msg *pubsub.Message) {
msg.Ack()
json.Unmarshal(msg.Data, sensorinfo)
//fmt.Println(string(msg.Data))
//fmt.Println(sensorinfo.SensorID)
sensorinfos.SensorData = append(sensorinfos.SensorData, *sensorinfo)
mu.Lock()
defer mu.Unlock()
received++
fmt.Println("rcv: ", received)
messages <- *sensorinfo
})
if err != nil {
fmt.Println(err)
}
// [END pubsub_subscriber_async_pull]
// [END pubsub_quickstart_subscriber]
}
func writeInflux(sensorpoints <- chan SensorData, bp clnt.BatchPoints) {
for p := range sensorpoints {
// Create a point and add to batch
tags := map[string]string{
"sensorId": p.SensorID,
"warehouse": p.Warehouse,
"area": p.Area,
"zipcode": string(p.Zipcode),
}
fields := map[string]interface{}{
"pressure": p.Pressure,
"humidity": p.Humidity,
"temperature": p.Temperature,
"dewpoint": p.Dewpoint,
"longitude": p.Longitude,
"latitude": p.Latitude,
}
pt, err := clnt.NewPoint("sensordata", tags, fields, time.Unix(p.Timecollected, 0))
if err != nil {
log.Fatal(err)
}
bp.AddPoint(pt)
}
}
but it doesn't see to every get past the initial pullMsgs function and just keeps printing the output in there:
rcv: 1
rcv: 2
rcv: 3
rcv: 4
rcv: 5
rcv: 6
rcv: 7
I thought that once the channel get full, it should block until the channel is emptied out
this is the pubsub pull code I'm using as a reference.
When you've sent the desired number of messages on the channel, close the channel and cancel the context. Try using the technique demonstrated in the documentation of canceling after some number of messages. Since your buffer is 100 and you're trying to consume 100 messages at a time, that's the number. If you want your program to exit, close the channel so that the for e := range ch loop in writeInflux hits a stopping point and doesn't block waiting for more elements to be added to the channel.
Note this in the Go pubsub API doc:
To terminate a call to Receive, cancel its context.
That is not what's stalling your main goroutine, but your pullMsgs goroutine will not exit on its own without that cancel.
Also, check for errors on Unmarshal. If you don't want to handle unmarshal errors at this point in the code, consider changing the channel type and sending msg or msg.Data instead and unmarshaling upon channel receipt.
cctx, cancel := context.WithCancel(ctx)
err := sub.Receive(cctx, func(ctx context.Context, msg *pubsub.Message) {
msg.Ack()
err := json.Unmarshal(msg.Data, sensorinfo)
if err != nil {
fmt.Printf("Failed to unmarshal: %s\n", err)
}
mu.Lock()
defer mu.Unlock()
received++
fmt.Println("rcv: ", received)
messages <- *sensorinfo
if received == 100 {
close(messages) // no more messages will be sent on channel
cancel()
}
I'm using the golang RabbitMQ library in a project, and I have a Connect function in a separate package. I'm calling Connect, in my main function, however because I connect to RabbitMQ in a separate function, the defer conn.Close() function is called, which closes the connection within the Connect function. Which makes perfect sense, but that begs the question, where then, do I call conn.Close()?
package drivers
import (
// Core
"log"
"os"
"time"
// Third party
"github.com/streadway/amqp"
)
type Queue struct {
Channel *amqp.Channel
}
func NewQueue() *Queue {
return &Queue{}
}
// Queue interface
type IQueue interface {
Connect(args ...interface{})
Publish(queue string, payload []byte) error
Listen(queue string) (<-chan amqp.Delivery, error)
Declare(queue string) (amqp.Queue, error)
}
// Connect - Connects to RabbitMQ
func (queue *Queue) Connect(args ...interface{}) {
var uri string
if args == nil {
// Get from env vars
uri = os.Getenv("RABBIT_MQ_URI")
if uri == "" {
log.Panic("No uri for queue given")
}
} else {
uri = args[0].(string)
}
// Make max 5 connection attempts, with a 1 second timeout
for i := 0; i < 5; i++ {
log.Println("Connecting to:", uri)
// If connection is successful, return new instance
conn, err := amqp.Dial(uri)
defer conn.Close()
if err == nil {
log.Println("Successfully connected to queue!")
channel, _ := conn.Channel()
queue.Channel = channel
return
}
log.Println("Failed to connect to queue, retrying...", err)
// Wait 1 second
time.Sleep(5 * time.Second)
}
}
// Declare a new queue
func (queue *Queue) Declare(queueName string) (amqp.Queue, error) {
return queue.Channel.QueueDeclare(
queueName,
true,
false,
false,
false,
nil,
)
}
// Publish a message
func (queue *Queue) Publish(queueName string, payload []byte) error {
return queue.Channel.Publish(
"",
queueName,
false,
false,
amqp.Publishing{
DeliveryMode: amqp.Persistent,
ContentType: "application/json",
Body: payload,
},
)
}
// Listen for a new message
func (queue *Queue) Listen(queueName string) (<-chan amqp.Delivery, error) {
return queue.Channel.Consume(
queueName,
"",
true,
false,
false,
false,
nil,
)
}
As you can see in the code above, I'm calling defer conn.Close() after making a connection, however, this immediately closes the connection again.
Here's a Go Playground spoofing what I'm talking about... https://play.golang.org/p/5cz2D4gDgn
The simple solution is to call conn.Close() from elsewhere. This might just be me, but I think it's kinda odd that you wouldn't expose the connection elsewhere, i.e. as a field in Queue. Exposing the ability to close the connection from the Queue would solve this and give you more flexibility.
So this:
type Queue struct {
// your original fields
Conn amqp.Connection
}
// Somewhere else
queue.Conn.Close()
You're other option is connecting, then doing all the actions you want with that connection, then closing. I'm thinking something like:
func action(conn amqp.Connection, args ...interface{}) (<-chan bool) {
done := make(chan bool)
go func(amqpConn amqp.Connection, dChan chan bool){
// Do what you want with the connection
dChan <- true
}(conn, done)
return done
}
func (queue *Queue) Connect(args ...interface{}) {
// your connection code
doneChans := make([](chan bool), 5)
for i := 0; i < 5; i++ {
conn, err := amqp.Dial(uri)
defer conn.Close()
if err != nil {
// handle error
}
done := action(conn)
}
// This for loop will block until the 5 action calls are done
for j := range doneChans {
isFinish := <-doneChans[j]
if !isFinish {
// handle bad state
}
}
}
One option is to have Connect return conn, and call defer conn.Close() in the caller.
package driver
// imports, etc
func (queue *Queue) Connect(args ...interface{}) amqp.Connection, error {
// ...
conn, err := amqp.Dial(uri)
if err != nil {
return nil, err
}
// ...
return conn, nil
}
Then in another package:
package stuff
// imports, etc
func doStuff() {
queue = driver.NewQueue()
conn, err := queue.Connect(args...)
if err != nil {
log.Fatalf("oh no! %v!", err)
}
defer conn.Close()
// Do stuff
}