I am creating GO rest APIs. We are using AWS server.
I want to send push notification to mobile. Then I used
https://pkg.go.dev/github.com/robfig/cron (https://github.com/robfig/cron )
for creating cron job.
We are using 2 version of API, V1(old one) and V1.1(new one)
we have more than 1 environment dev,QA,preproduction,production
in our go lang code I created a cron job for sending push notification to mobile. and the function called inside main().
But we are getting 2 notification each interval.
I didn't understand why 2 one is getting at a time
I am attaching my code.
const title = "This Week’s Activity"
func NotifyWeeklyActivity(db *sql.DB, logger *zap.Logger) {
logger.Info("NotifyWeeklyActivity- start")
c := cron.New()
c.AddFunc("*/5 * * * *", func() {
lastweekTime := CurrentUTC().AddDate(0, 0, -7)
type PostCount struct {
HiveID uint64 `json:"hive_id"`
Post uint64 `json:"post"`
NotificationTopicArn null.String `json:"notification_topic_arn"`
}
var posts []PostCount
err := queries.Raw(`
select count(post_id) as post , post.hive_id as hive_id , hive.notification_topic_arn
from post
join hive on post.hive_id=hive.hive_id and hive.deleted_at is null
where post.deleted_at is null
and hive.deleted_at is null
and post.created_at between ? and ?
group by hive_id
having count(post_id)>3 ;
`, lastweekTime, CurrentUTC()).Bind(context.TODO(), db, &posts)
if err != nil {
logger.Error("error while fetching data ", zap.Error(err))
// return err
}
cfg, _ := config.GetImpart()
if cfg.Env != config.Local {
notification := NewImpartNotificationService(db, string(cfg.Env), cfg.Region, cfg.IOSNotificationARN, logger)
logger.Info("Notification- fetching complted")
for _, hive := range posts {
pushNotification := Alert{
Title: aws.String(title),
Body: aws.String(
fmt.Sprintf("Check out %d new posts in your Hive this week", hive.Post),
),
}
additionalData := NotificationData{
EventDatetime: CurrentUTC(),
HiveID: hive.HiveID,
}
Logger.Info("Notification",
zap.Any("pushNotification", pushNotification),
zap.Any("additionalData", additionalData),
zap.Any("hive", hive),
)
err = notification.NotifyTopic(context.Background(), additionalData, pushNotification, hive.NotificationTopicArn.String)
if err != nil {
logger.Error("error sending notification to topic", zap.Error(err))
}
}
}
})
c.Start()
}
func NewImpartNotificationService(db *sql.DB, stage, region, platformApplicationARN string, logger *zap.Logger) NotificationService {
//SNS not available in us-east-2
if strings.EqualFold(region, "us-east-2") {
region = "us-east-1"
}
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region),
HTTPClient: NewHttpClient(10 * time.Second),
})
if err != nil {
logger.Fatal("unable to create aws session", zap.Error(err))
}
snsAppleNotificationService := &snsAppleNotificationService{
stage: stage,
Logger: logger,
SNS: sns.New(sess),
platformApplicationARN: platformApplicationARN,
db: db,
}
logger.Debug("created new NotificationService",
zap.String("stage", stage),
zap.String("arn", platformApplicationARN))
return snsAppleNotificationService
}
Why I am getting 2 notification at a time ?
How can I Solve this
func (ns *snsAppleNotificationService) NotifyTopic(ctx context.Context, data NotificationData, alert Alert, topicARN string) error {
var b []byte
var err error
if strings.TrimSpace(topicARN) == "" {
return nil
}
ns.Logger.Debug("sending push notification",
zap.Any("data", data),
zap.Any("msg", alert),
zap.String("platformEndpoint", topicARN),
zap.String("arn", ns.platformApplicationARN))
if b, err = json.Marshal(apnsMessageWrapper{
APNSData: APNSMessage{
Alert: alert,
Sound: aws.String("default"),
Data: data,
Badge: aws.Int(0),
},
}); err != nil {
return err
}
msg := awsSNSMessage{Default: *alert.Body}
msg.APNS = string(b)
msg.APNSSandbox = string(b)
if b, err = json.Marshal(msg); err != nil {
return err
}
input := &sns.PublishInput{
Message: aws.String(string(b)),
MessageStructure: aws.String("json"),
TopicArn: aws.String(topicARN),
}
// print()
_, err = ns.Publish(input)
if err != nil {
ns.Logger.Error("push-notification : After publish input",
zap.Any("topicARN", topicARN),
zap.Error(err),
)
}
return err
}
main fuction
func main() {
logger, err := zap.NewProduction()
if err != nil {
log.Fatal(err)
}
cfg, err := config.GetImpart()
if err != nil {
logger.Fatal("error parsing config", zap.Error(err))
}
if cfg == nil {
logger.Fatal("nil config")
return
}
if cfg.Debug {
gin.SetMode(gin.DebugMode)
//boil.DebugMode = true
boil.WithDebugWriter(context.TODO(), &config.ZapBoilWriter{Logger: logger})
logger, _ = zap.NewDevelopment()
if cfg.Env == config.Local || cfg.Env == config.Development {
logger.Debug("config startup", zap.Any("config", *cfg))
}
} else {
gin.SetMode(gin.ReleaseMode)
}
//init the sentry logger ,either debug
logger, err = impart.InitSentryLogger(cfg, logger, cfg.Debug)
if err != nil {
logger.Error("error on sentry init", zap.Any("error", err))
}
migrationDB, err := cfg.GetMigrationDBConnection()
if err != nil {
logger.Fatal("unable to connect to DB", zap.Error(err))
}
//Trap sigterm during migraitons
migrationsDoneChan := make(chan bool)
shutdownMigrationsChan := make(chan bool)
sigc := make(chan os.Signal, 1)
signal.Notify(sigc,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
go func() {
select {
case <-sigc:
logger.Info("received a shutdown request during migrations, sending shutdown signal")
shutdownMigrationsChan <- true
case <-migrationsDoneChan:
logger.Info("migrations complete, no longer waiting for sig int")
return
}
}()
err = migrater.RunMigrationsUp(migrationDB, cfg.MigrationsPath, logger, shutdownMigrationsChan)
if err != nil {
logger.Fatal("error running migrations", zap.Error(err))
}
migrationsDoneChan <- true
if err := migrationDB.Close(); err != nil {
logger.Fatal("error closing migrations DB connection", zap.Error(err))
}
boil.SetLocation(time.UTC)
db, err := cfg.GetDBConnection()
if err != nil {
logger.Fatal("unable to connect to DB", zap.Error(err))
}
defer db.Close()
defer logger.Sync()
// if err := migrater.BootStrapAdminUsers(db, cfg.Env, logger); err != nil {
// logger.Fatal("unable to bootstrap user", zap.Error(err))
// }
// if err := migrater.BootStrapTopicHive(db, cfg.Env, logger); err != nil {
// logger.Fatal("unable to bootstrap user", zap.Error(err))
// }
// initiate global profanity detector
impart.InitProfanityDetector(db, logger)
impart.NotifyWeeklyActivity(db, logger)
services := setupServices(cfg, db, logger)
r := gin.New()
r.Use(CORS)
r.Use(secure.Secure(secure.Options{
//AllowedHosts: []string{"*"},
// AllowedHosts: []string{"localhost:3000", "ssl.example.com"},
//SSLRedirect: true,
// SSLHost: "*",
SSLProxyHeaders: map[string]string{"X-Forwarded-Proto": "https"},
STSIncludeSubdomains: true,
FrameDeny: true,
ContentTypeNosniff: true,
BrowserXssFilter: true,
ContentSecurityPolicy: "default-src 'self'",
}))
r.RedirectTrailingSlash = true
r.Use(ginzap.RecoveryWithZap(logger, true)) // panics don't stop server
r.Use(ginzap.Ginzap(logger, time.RFC3339, true)) // logs all requests
r.NoRoute(noRouteFunc)
r.GET("/ping", func(ctx *gin.Context) {
_, err := dbmodels.Pings(dbmodels.PingWhere.Ok.EQ(true)).One(ctx, db)
if err != nil {
ctx.AbortWithStatus(http.StatusInternalServerError)
}
ctx.String(http.StatusOK, "pong")
})
var v1Route string
var v2Route string
if cfg.Env == config.Production || cfg.Env == config.Local {
v1Route = "v1"
v2Route = "v1.1"
} else {
v1Route = fmt.Sprintf("%s/v1", cfg.Env)
v2Route = fmt.Sprintf("%s/v1.1", cfg.Env)
}
err = mailchimp.SetKey(impart.MailChimpApiKey)
if err != nil {
logger.Info("Error connecting Mailchimp", zap.Error(err),
zap.Any("MailchimpApikey", cfg.MailchimpApikey))
}
v1 := r.Group(v1Route)
setRouter(v1, services, logger, db)
v2 := r.Group(v2Route)
setRouter(v2, services, logger, db)
server := cfg.GetHttpServer()
server.Handler = r
logger.Info("Impart backend started.", zap.Int("port", cfg.Port), zap.String("env", string(cfg.Env)))
if err := graceful.Graceful(server.ListenAndServe, server.Shutdown); err != nil {
logger.Fatal("error serving", zap.Error(err))
}
logger.Info("done serving")
}
publish
// See also, https://docs.aws.amazon.com/goto/WebAPI/sns-2010-03-31/Publish
func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) {
req, out := c.PublishRequest(input)
return out, req.Send()
}
I have this API that scans for drivers' locations and send them via web-socket every 1 second. The issue is that the loop cannot be escaped when client disconnects. It seems it is alive for ever. I am using Gin with nhooyr websocket library.
var GetDriverLocations = func(c *gin.Context) {
wsoptions := websocket.AcceptOptions{InsecureSkipVerify: true}
wsconn, err := websocket.Accept(c.Writer, c.Request, &wsoptions)
if err != nil {
return
}
defer wsconn.Close(websocket.StatusInternalError, "the sky is falling")
driverLocation := &models.DriverLocation{}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-c.Request.Context().Done():
fmt.Println("done") //this never gets printed
return
}
coords, err := driverLocation.GetDrivers()
if err != nil {
break
}
err = wsjson.Write(c.Request.Context(), wsconn, &coords)
if websocket.CloseStatus(err) == websocket.StatusNormalClosure {
break
}
if err != nil {
break
}
}
fmt.Println("conn ended") //this never gets printed
}
I also tried this loop but also has the same issue:
for range ticker.C{
coords, err := driverLocation.GetDrivers()
if err != nil {
break
}
err = wsjson.Write(c.Request.Context(), wsconn, &coords)
if websocket.CloseStatus(err) == websocket.StatusNormalClosure {
break
}
if err != nil {
break
}
}
Because the network connection is hijacked from the net/http server by the nhooyr websocket library, the context c.Request.Context() is not canceled until handler returns.
Call CloseRead to get a context that's canceled when the connection is closed. Use that context in the loop.
var GetDriverLocations = func(c *gin.Context) {
wsoptions := websocket.AcceptOptions{InsecureSkipVerify: true}
wsconn, err := websocket.Accept(c.Writer, c.Request, &wsoptions)
if err != nil {
return
}
defer wsconn.Close(websocket.StatusInternalError, "")
ctx := wsconn.CloseRead(c.Request.Context())
driverLocation := &models.DriverLocation{}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
case <-ctx.Done():
return
}
coords, err := driverLocation.GetDrivers()
if err != nil {
break
}
err = wsjson.Write(c.Request.Context(), wsconn, &coords)
if err != nil {
break
}
}
}
I have code (I use https://github.com/fiorix/go-smpp):
// -----------------------------------------------
// handleConnection new clients.
// -----------------------------------------------
func (_srv *ServerSmpp) handleConnection(_cfg *ConfigSmpp, c *conn) {
defer c.Close()
if err := _srv.auth(_cfg, c); err != nil {
if err != io.EOF {
log.Printf("smpp_server: server auth failed: %s\n", err)
}
return
}
notify := make(chan error)
go func() {
for {
pb, err := c.Read()
if err != nil {
notify <- err
return
}
err = _srv.Handler(_srv.RemoteProvider, c, pb)
if err != nil {
fmt.Printf("%s\n", err)
notify <- err
return
}
}
}()
for {
select {
case err:= <-notify:
if io.EOF == err {
fmt.Printf("Smpp server (read): %s\n", err)
return
}
case <-time.After(time.Second * 10):
fmt.Printf("Client disconnected by timeout.\n")
return
}
}
}
Code for invoked handleConnection:
func (_srv *ServerSmpp) Serve(_cfg *ConfigSmpp) {
for {
client, err := _srv.NetListener.Accept()
if err != nil {
break
}
c := newConn(client)
go _srv.handleConnection(_cfg, c)
}
}
When this code work, the server disconnects all clients by timeout 10 sec, but how I can disconnect the client when it's doesn't work 10 sec?
Your client object seems to be a net.Conn,
choose a way to call client.SetReadDeadline() with the appropriate time.Time value before blocking on client.Read() :
c.client.SetDeadline( time.Now().Add(10 * time.Second )
pb, err := c.Read() { ...
I use high-level structures to create a ws server.
func wsHandler(w http.ResponseWriter, r *http.Request) {
re := regexp.MustCompile(`^\/ws\/([0-9a-zA-Z]+)\/*`)
match := re.FindStringSubmatch(r.URL.Path)
if len(match) != 2 {
http.NotFound(w, r)
return
}
conn, _, _, err := ws.UpgradeHTTP(r, w)
if err != nil {
log.Printf("gobwas/ws: %s", err)
}
go func() {
defer conn.Close()
channels.Lock()
channels.Channels[match[1]] = append(channels.Channels[match[1]], &conn)
channels.Unlock()
for {
msg, op, err := wsutil.ReadClientData(conn)
if err != nil {
if err != io.EOF {
log.Printf("gobwas/ws/wsutil: %s", err)
}
break
}
if len(msg) > 0 {
go sendMessage(&conn, op, match[1], msg)
}
}
deleteConn(&conn, match[1])
}()
}
Then, when I implement a graceful disconnection, then with the usual conn.Close (), the connection simply breaks outside the protocol. I use the following code to close:
func onShutdown() {
channels.RLock()
for _, channel := range channels.Channels {
for _, conn := range channel {
if err := ws.WriteFrame(*conn, ws.NewCloseFrame(ws.NewCloseFrameBody(ws.StatusNormalClosure, "Server shutdown"))); err != nil {
fmt.Println(err)
}
(*conn).Close()
}
}
channels.RUnlock()
}
And even if I wait for a response from the client and then close the connection, then anyway, the client fixes the disconnection. (The client is the browser and JS WebSocket).
Sample JS code:
var socket = new WebSocket(`${location.protocol === "https:" ? "wss:" : "ws:"}//${window.location.host}/ws/valid`);
socket.onclose = function (event) {
if (event.wasClean) {
console.log('Clean');
} else {
console.log('disconnect');
}
console.log('Code: ' + event.code + ' reason: ' + event.reason);
};
I am facing a very weird problem over here. I started a Kafka consumer using sarama-cluster library in go to consume some messages from a kafka topic. But the messages are not being received by the consumer being started.
However, a very weird thing is happening. If I start another consumer parallel to it, the messages are suddenly being delivered to both the consumers.
I cannot think of a logical explanation to it. Any pointers will be appreciated.
Note : This problem started after the Kafka and Zookeeper servers were started non-gracefully.
Here is the go code of consumer for consuming the messages which is not working:
if err := consumer.Start(); err != nil {
return err
}
updChan, err := consumer.Consume()
if err != nil {
return err
}
go func() {
for {
select {
case msg, ok := <-updChan:
if !ok {
return
}
var message liveupdater.KafkaMessage
err := json.Unmarshal(msg.Msg, &message)
if err != nil {
fmt.Println(err)
}
err = handleMessaege(message)
if err != nil {
logrus.Println("encountered error:" + err.Error())
}
consumer.MarkProcessed(msg, string(message.Type))
}
}
}()
Following is the go code where consumer is receiving messages(the only difference between this and previous code is another consumer running in parallel for same topic).
consumeMessages(config)
if err := consumer.Start(); err != nil {
return err
}
updChan, err := consumer.Consume()
if err != nil {
return err
}
go func() {
for {
select {
case msg, ok := <-updChan:
if !ok {
return
}
var message liveupdater.KafkaMessage
err := json.Unmarshal(msg.Msg, &message)
if err != nil {
fmt.Println(err)
}
err = handleMessaege(message)
if err != nil {
logrus.Println("encountered error:" + err.Error())
}
consumer.MarkProcessed(msg, string(message.Type))
}
}
}()
func consumeMessages(config *rakshak_config.Config) {
kafkaConfig := kafka.Config{Brokers: strings.Split(config.Kafka.Brokers, ",")}
logrus.Println("brokers %s", config.Kafka.Brokers)
hermesConsumer, err := hermes.NewConsumer(hermes.Kafka, []string{config.Kafka.Topic}, kafkaConfig)
if err != nil {
logrus.Println("could not get consumer through hermes %s", err)
}
err = hermesConsumer.Start()
if err != nil {
logrus.Println("could not start consumer through hermes %s", err)
}
conChan, err := hermesConsumer.Consume()
if err != nil {
logrus.Println("not able to start consumer channel %s", err)
}
go func() {
for {
select {
case msg, ok := <-conChan:
if !ok {
logrus.Println("could not consume message")
}
logrus.Println("kafka msg string: %s", string(msg.Msg[:]))
hermesConsumer.MarkProcessed(msg, "")
}
}
}()
Thanks in advance.