Rabbit consumer connected, but after a while cannot receive message from queue - go

I have a code which is successfully connected and consume messages from RabbitMQ.
But after a while, consumers cannot receive messages however its connected while this issue happen.
package rabbitmq
import (
"context"
"fmt"
"os"
"runtime"
"time"
"github.com/getsentry/sentry-go"
log "github.com/sirupsen/logrus"
"github.com/streadway/amqp"
)
type RabbitMQ struct {
conn *amqp.Connection
queues map[string]amqp.Queue
connString string
rabbitCloseError chan *amqp.Error
recoveryConsumer []RecoveryConsumer
// ch *amqp.Channel
// exchange_name string
}
type RecoveryConsumer struct {
queueName string
routingKey string
handler func(d amqp.Delivery)
concurrency int8
}
type (
Delivery = amqp.Delivery
)
func (r *RabbitMQ) IfExist(queueName string) bool {
for _, item := range r.recoveryConsumer {
if item.queueName == queueName {
return false
}
}
return true
}
func (r *RabbitMQ) RecoverConsumers() {
for _, i := range r.recoveryConsumer {
go r.StartConsumer(i.queueName, i.routingKey, i.handler, int(i.concurrency))
log.Infof("Consumer for %v successfully recovered", i.queueName)
}
}
func (r *RabbitMQ) Reconnector() {
for { //nolint
select {
case err := <-r.rabbitCloseError:
log.Errorf("[RabbitMQ] Connection Closed : {'Reason': '%v', 'Code': '%v', 'Recoverable': '%v', 'Server_Side': '%v'", err.Reason, err.Code, err.Recover, err.Server)
log.Debug("Reconnecting after connection closed")
sentry.CaptureException(fmt.Errorf("[RabbitMQ] Connection Closed : {'Reason': '%v', 'Code': '%v', 'Recoverable': '%v', 'Server_Side': '%v'", err.Reason, err.Code, err.Recover, err.Server))
r.connection()
r.RecoverConsumers()
}
}
}
func (r *RabbitMQ) Connect(host string, user string, pass string, virthost string) {
r.connString = "amqp://" + user + ":" + pass + "#" + host + "/"
if virthost != "/" || len(virthost) > 0 {
r.connString += virthost
}
r.connection()
go r.Reconnector()
}
func (r *RabbitMQ) connection() {
if r.conn != nil {
if !r.conn.IsClosed() {
return
} else {
log.Info("Reconnecting to RabbitMQ...")
}
}
var err error
r.conn, err = amqp.Dial(r.connString)
if err != nil {
sentry.CaptureException(err)
log.Fatalf("%s: %s", "Failed to connect to RabbitMQ", err)
}
r.conn.Config.Heartbeat = 5 * time.Second
r.queues = make(map[string]amqp.Queue)
r.rabbitCloseError = make(chan *amqp.Error)
r.conn.NotifyClose(r.rabbitCloseError)
log.Debug("[RabbitMQ] Successfully connected to RabbitMQ")
log.Infof("Number of Active Thread/Goroutine %v", runtime.NumGoroutine())
}
func (r *RabbitMQ) CreateChannel() *amqp.Channel {
ch, err := r.conn.Channel()
if err != nil {
log.Error(err)
return nil
}
return ch
}
func (r *RabbitMQ) QueueAttach(ch *amqp.Channel, name string) {
q, err := ch.QueueDeclare(
name, // name
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
log.Fatalf("%s: %s", "Failed to declare a queue", err)
}
r.queues[name] = q
// r.ch.ExchangeDeclare()
}
func (r *RabbitMQ) TempQueueAttach(ch *amqp.Channel, name string) {
_, err := ch.QueueDeclare(
name, // name
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
ch.Close()
log.Fatalf("%s: %s", "Failed to declare a temporary queue", err)
sentry.CaptureException(fmt.Errorf("%s: %s", "Failed consume message", err))
}
}
func (r *RabbitMQ) Publish(ch *amqp.Channel, queue string, body []byte) {
span := sentry.StartSpan(context.TODO(), "publish message")
defer span.Finish()
err := ch.Publish(
"", // exchange
r.queues[queue].Name, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
Headers: map[string]interface{}{},
ContentType: "application/json",
ContentEncoding: "",
DeliveryMode: amqp.Persistent,
Priority: 0,
CorrelationId: "",
ReplyTo: "",
Expiration: "",
MessageId: "",
Timestamp: time.Now().UTC(),
Type: "",
UserId: "",
AppId: "",
Body: body,
})
if err != nil {
sentry.CaptureException(err)
log.Fatalf("%s: %s", "Failed to publish a message", err)
}
log.Debugf("Send message: %s", string(body))
}
func (r *RabbitMQ) StartConsumer(queueName string, routingKey string, handler func(d amqp.Delivery), concurrency int) {
// prefetch 4x as many messages as we can handle at once
ok := r.IfExist(queueName)
if ok {
r.recoveryConsumer = append(r.recoveryConsumer, RecoveryConsumer{
queueName: queueName,
routingKey: routingKey,
handler: handler,
concurrency: int8(concurrency),
})
}
ch, err := r.conn.Channel()
if err != nil {
log.Error(err)
}
prefetchCount := concurrency * 1
err = ch.Qos(prefetchCount, 0, false)
if err != nil {
sentry.CaptureException(err)
log.Errorf("%s: %s", "Failed QOS", err)
}
r.QueueAttach(ch, queueName)
msgs, err := ch.Consume(
queueName, // queue
"", // consumer
true, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
if err != nil {
sentry.CaptureException(err)
log.Fatalf("%s: %s", "Failed consume message", err)
sentry.CaptureException(fmt.Errorf("%s: %s", "Failed consume message", err))
os.Exit(1)
}
go func() {
for msg := range msgs {
handler(msg)
}
}()
}
func (r *RabbitMQ) WaitMessage(ch *amqp.Channel, queueName string, timeout time.Duration) []byte {
st := time.Now()
for time.Since(st).Seconds() < 1 {
msg, ok, err := ch.Get(queueName, true)
if err != nil {
log.Errorf("Can't consume queue. Error: %s", err.Error())
sentry.CaptureException(err)
return nil
}
if ok {
return msg.Body
}
time.Sleep(50 * time.Millisecond)
}
return nil
}
What can be reason for this ?
I know that it should be in Rabbit side, but client library cannot shows any error.....
Because after start to work, consume continue to listen and work successfully.

The only thing I can advice you to try is to use heartbeats. This will detect whether a connection is dead due to for example a network fail.
You can take a look on it here: https://www.rabbitmq.com/heartbeats.html.
I'm not sure about this one, long time since I used it, but if you put a try catch around the receiving messages bit, it might show up in the catch when the connection dies.
I hope this helps you a little for solving your problem.

Related

Consuming from multiple queue

I have a rabbit mq instance that I use to listen to changes. However, I have different messages that I need to consume. What I am not sure about now is how to go about it. Would I need to just keep duplicating my Consume method and what is the danger of doing that if any.
type Consumer struct {
conn *amqp.Connection
db *mongo.Database
queueName string
}
func (consumer *Consumer) setup() error {
_, err := consumer.conn.Channel()
if err != nil {
return err
}
return nil
}
// NewConsumer returns a new Consumer
func NewConsumer(conn *amqp.Connection, db *mongo.Database) (Consumer, error) {
consumer := Consumer{
conn: conn,
db: db,
}
return consumer, nil
}
// Listen will listen for all new Queue publications
// and print them to the console.
func (consumer *Consumer) Listen(topics []string) error {
ch, err := consumer.conn.Channel()
if err != nil {
return err
}
defer ch.Close()
msgs, err := ch.Consume("charge.get", "", true, false, false, false, nil)
if err != nil {
return err
}
forever := make(chan bool)
go func() {
for msg := range msgs {
switch msg.RoutingKey {
case "charge.get":
worker.Refund(paymentRepo.NewPaymentRepository(consumer.db), msg.Body)
}
// acknowledege received event
log.Printf("Received a message: %s ROUTIG KEY %s", msg.Body, msg.RoutingKey)
}
}()
log.Printf("[*] Waiting for message [Exchange, Queue][%s, %s]. To exit press CTRL+C", " getExchangeName()", "q.Name")
<-forever
return nil
}
Do I just add another msgs, err := ch.Consume("charge.update", "", true, false, false, false, nil) below the first one to consume from this queue? Or how can I go about this.

Getting More than 1 push notification using go lang cron

I am creating GO rest APIs. We are using AWS server.
I want to send push notification to mobile. Then I used
https://pkg.go.dev/github.com/robfig/cron (https://github.com/robfig/cron )
for creating cron job.
We are using 2 version of API, V1(old one) and V1.1(new one)
we have more than 1 environment dev,QA,preproduction,production
in our go lang code I created a cron job for sending push notification to mobile. and the function called inside main().
But we are getting 2 notification each interval.
I didn't understand why 2 one is getting at a time
I am attaching my code.
const title = "This Week’s Activity"
func NotifyWeeklyActivity(db *sql.DB, logger *zap.Logger) {
logger.Info("NotifyWeeklyActivity- start")
c := cron.New()
c.AddFunc("*/5 * * * *", func() {
lastweekTime := CurrentUTC().AddDate(0, 0, -7)
type PostCount struct {
HiveID uint64 `json:"hive_id"`
Post uint64 `json:"post"`
NotificationTopicArn null.String `json:"notification_topic_arn"`
}
var posts []PostCount
err := queries.Raw(`
select count(post_id) as post , post.hive_id as hive_id , hive.notification_topic_arn
from post
join hive on post.hive_id=hive.hive_id and hive.deleted_at is null
where post.deleted_at is null
and hive.deleted_at is null
and post.created_at between ? and ?
group by hive_id
having count(post_id)>3 ;
`, lastweekTime, CurrentUTC()).Bind(context.TODO(), db, &posts)
if err != nil {
logger.Error("error while fetching data ", zap.Error(err))
// return err
}
cfg, _ := config.GetImpart()
if cfg.Env != config.Local {
notification := NewImpartNotificationService(db, string(cfg.Env), cfg.Region, cfg.IOSNotificationARN, logger)
logger.Info("Notification- fetching complted")
for _, hive := range posts {
pushNotification := Alert{
Title: aws.String(title),
Body: aws.String(
fmt.Sprintf("Check out %d new posts in your Hive this week", hive.Post),
),
}
additionalData := NotificationData{
EventDatetime: CurrentUTC(),
HiveID: hive.HiveID,
}
Logger.Info("Notification",
zap.Any("pushNotification", pushNotification),
zap.Any("additionalData", additionalData),
zap.Any("hive", hive),
)
err = notification.NotifyTopic(context.Background(), additionalData, pushNotification, hive.NotificationTopicArn.String)
if err != nil {
logger.Error("error sending notification to topic", zap.Error(err))
}
}
}
})
c.Start()
}
func NewImpartNotificationService(db *sql.DB, stage, region, platformApplicationARN string, logger *zap.Logger) NotificationService {
//SNS not available in us-east-2
if strings.EqualFold(region, "us-east-2") {
region = "us-east-1"
}
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region),
HTTPClient: NewHttpClient(10 * time.Second),
})
if err != nil {
logger.Fatal("unable to create aws session", zap.Error(err))
}
snsAppleNotificationService := &snsAppleNotificationService{
stage: stage,
Logger: logger,
SNS: sns.New(sess),
platformApplicationARN: platformApplicationARN,
db: db,
}
logger.Debug("created new NotificationService",
zap.String("stage", stage),
zap.String("arn", platformApplicationARN))
return snsAppleNotificationService
}
Why I am getting 2 notification at a time ?
How can I Solve this
func (ns *snsAppleNotificationService) NotifyTopic(ctx context.Context, data NotificationData, alert Alert, topicARN string) error {
var b []byte
var err error
if strings.TrimSpace(topicARN) == "" {
return nil
}
ns.Logger.Debug("sending push notification",
zap.Any("data", data),
zap.Any("msg", alert),
zap.String("platformEndpoint", topicARN),
zap.String("arn", ns.platformApplicationARN))
if b, err = json.Marshal(apnsMessageWrapper{
APNSData: APNSMessage{
Alert: alert,
Sound: aws.String("default"),
Data: data,
Badge: aws.Int(0),
},
}); err != nil {
return err
}
msg := awsSNSMessage{Default: *alert.Body}
msg.APNS = string(b)
msg.APNSSandbox = string(b)
if b, err = json.Marshal(msg); err != nil {
return err
}
input := &sns.PublishInput{
Message: aws.String(string(b)),
MessageStructure: aws.String("json"),
TopicArn: aws.String(topicARN),
}
// print()
_, err = ns.Publish(input)
if err != nil {
ns.Logger.Error("push-notification : After publish input",
zap.Any("topicARN", topicARN),
zap.Error(err),
)
}
return err
}
main fuction
func main() {
logger, err := zap.NewProduction()
if err != nil {
log.Fatal(err)
}
cfg, err := config.GetImpart()
if err != nil {
logger.Fatal("error parsing config", zap.Error(err))
}
if cfg == nil {
logger.Fatal("nil config")
return
}
if cfg.Debug {
gin.SetMode(gin.DebugMode)
//boil.DebugMode = true
boil.WithDebugWriter(context.TODO(), &config.ZapBoilWriter{Logger: logger})
logger, _ = zap.NewDevelopment()
if cfg.Env == config.Local || cfg.Env == config.Development {
logger.Debug("config startup", zap.Any("config", *cfg))
}
} else {
gin.SetMode(gin.ReleaseMode)
}
//init the sentry logger ,either debug
logger, err = impart.InitSentryLogger(cfg, logger, cfg.Debug)
if err != nil {
logger.Error("error on sentry init", zap.Any("error", err))
}
migrationDB, err := cfg.GetMigrationDBConnection()
if err != nil {
logger.Fatal("unable to connect to DB", zap.Error(err))
}
//Trap sigterm during migraitons
migrationsDoneChan := make(chan bool)
shutdownMigrationsChan := make(chan bool)
sigc := make(chan os.Signal, 1)
signal.Notify(sigc,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
go func() {
select {
case <-sigc:
logger.Info("received a shutdown request during migrations, sending shutdown signal")
shutdownMigrationsChan <- true
case <-migrationsDoneChan:
logger.Info("migrations complete, no longer waiting for sig int")
return
}
}()
err = migrater.RunMigrationsUp(migrationDB, cfg.MigrationsPath, logger, shutdownMigrationsChan)
if err != nil {
logger.Fatal("error running migrations", zap.Error(err))
}
migrationsDoneChan <- true
if err := migrationDB.Close(); err != nil {
logger.Fatal("error closing migrations DB connection", zap.Error(err))
}
boil.SetLocation(time.UTC)
db, err := cfg.GetDBConnection()
if err != nil {
logger.Fatal("unable to connect to DB", zap.Error(err))
}
defer db.Close()
defer logger.Sync()
// if err := migrater.BootStrapAdminUsers(db, cfg.Env, logger); err != nil {
// logger.Fatal("unable to bootstrap user", zap.Error(err))
// }
// if err := migrater.BootStrapTopicHive(db, cfg.Env, logger); err != nil {
// logger.Fatal("unable to bootstrap user", zap.Error(err))
// }
// initiate global profanity detector
impart.InitProfanityDetector(db, logger)
impart.NotifyWeeklyActivity(db, logger)
services := setupServices(cfg, db, logger)
r := gin.New()
r.Use(CORS)
r.Use(secure.Secure(secure.Options{
//AllowedHosts: []string{"*"},
// AllowedHosts: []string{"localhost:3000", "ssl.example.com"},
//SSLRedirect: true,
// SSLHost: "*",
SSLProxyHeaders: map[string]string{"X-Forwarded-Proto": "https"},
STSIncludeSubdomains: true,
FrameDeny: true,
ContentTypeNosniff: true,
BrowserXssFilter: true,
ContentSecurityPolicy: "default-src 'self'",
}))
r.RedirectTrailingSlash = true
r.Use(ginzap.RecoveryWithZap(logger, true)) // panics don't stop server
r.Use(ginzap.Ginzap(logger, time.RFC3339, true)) // logs all requests
r.NoRoute(noRouteFunc)
r.GET("/ping", func(ctx *gin.Context) {
_, err := dbmodels.Pings(dbmodels.PingWhere.Ok.EQ(true)).One(ctx, db)
if err != nil {
ctx.AbortWithStatus(http.StatusInternalServerError)
}
ctx.String(http.StatusOK, "pong")
})
var v1Route string
var v2Route string
if cfg.Env == config.Production || cfg.Env == config.Local {
v1Route = "v1"
v2Route = "v1.1"
} else {
v1Route = fmt.Sprintf("%s/v1", cfg.Env)
v2Route = fmt.Sprintf("%s/v1.1", cfg.Env)
}
err = mailchimp.SetKey(impart.MailChimpApiKey)
if err != nil {
logger.Info("Error connecting Mailchimp", zap.Error(err),
zap.Any("MailchimpApikey", cfg.MailchimpApikey))
}
v1 := r.Group(v1Route)
setRouter(v1, services, logger, db)
v2 := r.Group(v2Route)
setRouter(v2, services, logger, db)
server := cfg.GetHttpServer()
server.Handler = r
logger.Info("Impart backend started.", zap.Int("port", cfg.Port), zap.String("env", string(cfg.Env)))
if err := graceful.Graceful(server.ListenAndServe, server.Shutdown); err != nil {
logger.Fatal("error serving", zap.Error(err))
}
logger.Info("done serving")
}
publish
// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Publish
func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) {
req, out := c.PublishRequest(input)
return out, req.Send()
}

How to return a channel

I'm creating a worker to consume messages from a RabitMQ queue. To achieve that, I created the following file named queue.go
package ExternalServices
import (
"../domain"
"encoding/json"
"github.com/streadway/amqp"
"os"
)
const (
catalogQueue = "catalog-queue"
)
func EnqueueMessageCatalog(catalog *domain.Catalog) error {
marshal, err := json.Marshal(*catalog)
if err != nil {
return err
}
jsonVal := string(marshal)
err = enqueue(catalogQueue, jsonVal)
return err
}
func DequeueMessageCatalog() ([]domain.Catalog, error) {
msgs, err := dequeue(catalogQueue)
if err != nil {
return nil, err
}
allCatalogs := make([]domain.Catalog, len(msgs))
for _, currMsg := range msgs {
var currCatalog domain.Catalog
err = json.Unmarshal([]byte(currMsg), &currCatalog)
if err != nil {
return nil, err
}
}
return allCatalogs, nil
}
func openConnection() (*amqp.Connection, *amqp.Channel, error) {
conn, err := amqp.Dial(os.Getenv("RabbitMQConStr"))
if err != nil {
return nil, nil, err
}
ch, err := conn.Channel()
if err != nil {
conn.Close()
return nil, nil, err
}
return conn, ch, nil
}
func ensureQueueExists(queueName string, ch *amqp.Channel) (amqp.Queue, error) {
q, err := ch.QueueDeclare(
queueName, // name
false, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
return q, err
}
func enqueue(queueName string, message string) error {
con, ch, err := openConnection()
if err != nil {
return err
}
defer con.Close()
defer ch.Close()
q, err := ensureQueueExists(queueName, ch)
if err != nil {
return err
}
err = ch.Publish(
"", // exchange
q.Name, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "application/json",
Body: []byte(message),
})
return err
}
func dequeue(queueName string) ([]string, error) {
con, ch, err := openConnection()
if err != nil {
return nil, err
}
defer con.Close()
defer ch.Close()
q, err := ensureQueueExists(queueName, ch)
if err != nil {
return nil, err
}
msgs, err := ch.Consume(
q.Name, // queue
"", // consumer
true, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
if err != nil {
return nil, err
}
jsons := make([]string, len(msgs))
i := 0
for currMsg:= range msgs {
jsons[i] = string(currMsg.Body)
i += 1
}
return jsons, nil
}
However, I got a bit confused at the dequeue function. I want my worker to be notified every time a messages arrives at my queue, so I guess the proper way to do so is to create a string chan to my worker, since I don't want to expose the message channel returned by Consume to it.
This is my worker so far.
package worker
import (
"../external-services"
"log"
)
func StartWorker() {
go func() {
messages, err := ExternalServices.DequeueMessageCatalog();
if err != nil {
// todo log
}
for d := range messages {
log.Printf("Received a message: %s", d)
}
}()
}
How can I modify my dequeue function so it returns a string chan?
After modifying this method to return the string chan, do the lines defer con.Close() and defer ch.Close() need to be deleted from this method?
It's my first project in GoLang so anything you think can increase the quality of the code will be much appreciated :-D
maybe something like this:
msgs, err := ch.Consume(...)
/* handle error */
stringCh := make(chan string)
done := make(chan struct{})
go func() {
defer con.Close()
defer ch.Close()
defer close(stringCh)
for {
select {
case currMsg := <-msgs:
stringCh <- string(currMsg.Body)
case <-done:
return
}
}
}()
return stringCh, done
This is only a sketchy example. Basic idea is spawn another goroutine listen to the message chan returned by Consume. Other details like how to graceful shutdown, dequeue interface,... depend on your needs.
After reading #YSTai response, I realised I miss the go routine creation. This is how my code end up.
worker.go
package main
import (
"../domain"
"../externalservices"
"log"
"strings"
"sync"
)
/*
StartWorker initializes a program that will wait for messages enqueued and process them
*/
func StartWorker() {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
catalogReceived := make(chan domain.Catalog)
defer close(catalogReceived)
for true {
go func() {
externalservices.DequeueMessageCatalog(catalogReceived)
catalog := <-catalogReceived
website := domain.Website{
Name: strings.ToUpper(catalog.Name),
Zip: catalog.Zip}
externalservices.InsertWebSite(&website)
}()
}
}()
log.Printf(" [*] Waiting for messages")
wg.Wait()
}
func main() {
StartWorker()
}
queue.go
package externalservices
import (
"../domain"
"encoding/json"
"github.com/streadway/amqp"
"os"
)
const (
catalogQueue = "catalog-queue"
)
func EnqueueMessageCatalog(catalog *domain.Catalog) error {
marshal, err := json.Marshal(*catalog)
if err != nil {
return err
}
jsonVal := string(marshal)
err = enqueue(catalogQueue, jsonVal)
return err
}
// DequeueMessageCatalog is nice
func DequeueMessageCatalog(messageChannel chan domain.Catalog) {
message := make(chan []byte)
defer close(message)
for true {
go func() {
dequeue(catalogQueue, message)
}()
currCatalog := domain.Catalog{}
json.Unmarshal([]byte(<-message), &currCatalog)
messageChannel <- currCatalog
}
}
func openConnection() (*amqp.Connection, *amqp.Channel, error) {
connString := os.Getenv("RabbitMQConStr")
conn, err := amqp.Dial(connString)
if err != nil {
return nil, nil, err
}
ch, err := conn.Channel()
if err != nil {
conn.Close()
return nil, nil, err
}
return conn, ch, nil
}
func ensureQueueExists(queueName string, ch *amqp.Channel) (amqp.Queue, error) {
q, err := ch.QueueDeclare(
queueName, // name
false, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
return q, err
}
func enqueue(queueName string, message string) error {
con, ch, err := openConnection()
if err != nil {
return err
}
defer con.Close()
defer ch.Close()
q, err := ensureQueueExists(queueName, ch)
if err != nil {
return err
}
err = ch.Publish(
"", // exchange
q.Name, // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "application/json",
Body: []byte(message),
})
return err
}
func dequeue(queueName string, message chan []byte) error {
con, ch, err := openConnection()
if err != nil {
return err
}
defer con.Close()
defer ch.Close()
q, err := ensureQueueExists(queueName, ch)
if err != nil {
return err
}
msgs, err := ch.Consume(
q.Name, // queue
"", // consumer
true, // auto-ack
false, // exclusive
false, // no-local
true, // no-wait
nil, // args
)
if err != nil {
return err
}
for currMsg := range msgs {
message <- currMsg.Body
}
return nil
}

Golang amqp reconnect

I want to test the restart connection to the rabbitmq server.
On wrote small script to test.
http://play.golang.org/p/l3ZWzG0Qqb
But it's not working.
In step 10, I close the channel and connection. And open them again. And re-create chan amqp.Confirmation ( :75) . And continue the cycle.
But after that, from the chan confirms nothing return.
UPD: code here.
package main
import (
"fmt"
"github.com/streadway/amqp"
"log"
"os"
"time"
)
const SERVER = "amqp://user:pass#localhost:5672/"
const EXCHANGE_NAME = "publisher.test.1"
const EXCHANGE_TYPE = "direct"
const ROUTING_KEY = "publisher.test"
var Connection *amqp.Connection
var Channel *amqp.Channel
func setup(url string) (*amqp.Connection, *amqp.Channel, error) {
conn, err := amqp.Dial(url)
if err != nil {
return nil, nil, err
}
ch, err := conn.Channel()
if err != nil {
return nil, nil, err
}
return conn, ch, nil
}
func main() {
url := SERVER
Connection, Channel, err := setup(url)
if err != nil {
fmt.Println("err publisher setup:", err)
return
}
confirms := Channel.NotifyPublish(make(chan amqp.Confirmation, 1))
if err := Channel.Confirm(false); err != nil {
log.Fatalf("confirm.select destination: %s", err)
}
for i := 1; i <= 3000000; i++ {
log.Println(i)
if err != nil {
fmt.Println("err consume:", err)
return
}
if err := Channel.Publish(EXCHANGE_NAME, ROUTING_KEY, false, false, amqp.Publishing{
Body: []byte(fmt.Sprintf("%d", i)),
}); err != nil {
fmt.Println("err publish:", err)
log.Printf("%+v", err)
os.Exit(1)
return
}
// only ack the source delivery when the destination acks the publishing
confirmed := <-confirms
if confirmed.Ack {
log.Printf("confirmed delivery with delivery tag: %d", confirmed.DeliveryTag)
} else {
log.Printf("failed delivery of delivery tag: %d", confirmed.DeliveryTag)
// TODO. Reconnect will be here
}
if i == 10 {
Channel.Close()
Connection.Close()
while := true
for while {
log.Println("while")
time.Sleep(time.Second * 1)
Connection, Channel, err = setup(url)
if err == nil {
while = false
confirms = Channel.NotifyPublish(make(chan amqp.Confirmation, 1))
log.Printf("%+v", confirms)
}
}
}
time.Sleep(time.Millisecond * 300)
}
os.Exit(1)
}
You should put channel in confirm mode. by calling the channel.Confirm() method.
After closing the connection and even after getting new channel on the same connection, you should call Confirm() method again, since the channel is different from the old channel, and the default for all new channel is not to send confirm.

Could one connection support multiple channels in go api for rabbitmq?

package main
import (
"fmt"
"github.com/streadway/amqp"
"time"
)
// Every connection should declare the topology they expect
func setup(url, queue string) (*amqp.Connection, *amqp.Channel, error) {
//setup connection
conn, err := amqp.Dial(url)
if err != nil {
return nil, nil, err
}
//build channel in the connection
ch, err := conn.Channel()
if err != nil {
return nil, nil, err
}
//queue declare
if _, err := ch.QueueDeclare(queue, false, true, false, false, nil); err != nil {
return nil, nil, err
}
return conn, ch, nil
}
func main() {
//amqp url
url := "amqp://guest:guest#127.0.0.1:5672";
for i := 1; i <= 2; i++ {
fmt.Println("connect ", i)
//two goroutine
go func() {
//queue name
queue := fmt.Sprintf("example.reconnect.%d", i)
//setup channel in the tcp connection
_, pub, err := setup(url, queue)
if err != nil {
fmt.Println("err publisher setup:", err)
return
}
// Purge the queue from the publisher side to establish initial state
if _, err := pub.QueuePurge(queue, false); err != nil {
fmt.Println("err purge:", err)
return
}
//publish msg
if err := pub.Publish("", queue, false, false, amqp.Publishing{
Body: []byte(fmt.Sprintf("%d", i)),
}); err != nil {
fmt.Println("err publish:", err)
return
}
//keep running
for{
time.Sleep(time.Second * 20)
}
}()
}
//keep running
for {
time.Sleep(time.Second * 20)
}
}
I thought there is only one connection between the program and mq-server,
but there are two connection,one connection can only support one channel,why?
can't the two goroutine share the same tcp connection?
Socket descriptor can share in all threads of a process in the theory.
Why the two goroutine don't share one socket but have their own channel?
The model by hand:
The real model in rabbitmq:
Looking at the source for the library it appears as though you can call conn.Channel() as many times as you like and it creates a new stream of communication over the same connection.
Ok, I tried it, here's a working example... One goroutine, one connection, two channels
I setup the receiver, then send a message, then read from the receiver channel
if you wanted multiple queue's bound in one goroutine, you would call rec.Consume twice and then select across the queues.
package main
import (
"fmt"
"github.com/streadway/amqp"
"os"
)
func main() {
conn, err := amqp.Dial("amqp://localhost")
e(err)
defer conn.Close()
fmt.Println("Connected")
rec, err := conn.Channel()
e(err)
fmt.Println("Setup receiver")
rq, err := rec.QueueDeclare("go-test", false, false, false, false, nil)
e(err)
msgs, err := rec.Consume(rq.Name, "", true, false, false, false, nil)
e(err)
fmt.Println("Setup sender")
send, err := conn.Channel()
e(err)
sq, err := send.QueueDeclare("go-test", false, false, false, false, nil)
e(err)
fmt.Println("Send message")
err = send.Publish("", sq.Name, false, false, amqp.Publishing{
ContentType: "text/plain",
Body: []byte("This is a test"),
})
e(err)
msg := <-msgs
fmt.Println("Received from:", rq, "msg:", string(msg.Body))
}
func e(err error) {
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
Output on my box:
$ go run rmq.go
Connected
Setup receiver
Setup sender
Send message
Received from: {go-test 0 0} msg: This is a test

Resources