So I am creating some log analyzer in golang and what I need is real-time tail -f of newly created files.
I am using tail package together with fsnotify package but I'm not very familiar with channels and routines in go so I need some assistance.
Currently program looks like this:
package main
import(
"fmt"
"github.com/hpcloud/tail"
"strings"
"log"
"net/smtp"
"time"
"github.com/fsnotify/fsnotify"
)
//this function needs to monitor for new files in directory
func newFileCheck() (newFilename chan string, err error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return
}
err = watcher.Add("myDir")
if err != nil {
return
}
newFilename = make(chan string)
// Process events
go func() {
for {
select {
case ev := <-watcher.Events:
log.Println("event:", ev)
newFilename <- ev.Name // Relative path to the file
//t.Stop() //if I pass reference to t THIS IS NOT HAPPENING ?
case err := <-watcher.Errors:
log.Println("error:", err)
}
}
}()
return
}
func main() {
newFileName = "mylog_0000.log.txt"
fmt.Println("Processing log: ",newFileName)
newFilenameChan, err := newFileCheck()
if err != nil {
fmt.Println("ERR: ",err)
}
t := tailLog(newFileName)
go func() {
for {
select {
case name := <-newFilenameChan:
fmt.Println("New file created: ",name) //this will be printed only once and then on new events nothing is printed ?
//NONE of the lines abowe doesn't work
t.Stop()
t.Dead()
t.Done()
t = tailLog(name)
}
}
}()
}
func tailLog(fileName string) *tail.Tail{
var count = 0
// close the old one and read new file
t, err := tail.TailFile("/mydir/"+fileName, tail.Config{Follow: true, ReOpen: true})
for line := range t.Lines {
//fmt.Println("Line is:", line.Text)
//check do we have what we need
if strings.Contains(strings.ToLower(line.Text), "mfc"){
count++
//do other stuff
}
}
fmt.Println(err)
return t
}
So I can't figure out why newFileCheck functions prints event only for the fist time, and also I'm not able to figure out how to cancel active tail when new event happens, and then start tail again after that event ?
Consider this:
When the function main returns, the program exits. It does not wait
for other (non-main) goroutines to complete.
And look at this block of your code:
func main() {
newFileName = "mylog_0000.log.txt"
fmt.Println("Processing log: ",newFileName)
newFilenameChan, err := newFileCheck()
if err != nil {
fmt.Println("ERR: ",err)
}
t := tailLog(newFileName)
go func() {
for {
select {
case name := <-newFilenameChan:
fmt.Println("New file created: ",name) //this will be printed only once and then on new events nothing is printed ?
//NONE of the lines abowe doesn't work
t.Stop()
t.Dead()
t.Done()
t = tailLog(name)
}
}
}()
}
To handle this job with grace, you should learn more about concurrency in Golang. Use channels to control goroutines like stopping/starting/etc and WaitGroup if you are interested in order of executing/finishing tasks. When there is no control over your program flow, goroutines can lives their own lives and that is bad practice.
If you about this package https://github.com/hpcloud/tail
You need to create one goroutine after you are open file, then you need to check a new file.
I make it for checking a new logfile of SoftetherVPN-Server:
package main
import (
"fmt"
"github.com/hpcloud/tail"
"log"
"time"
)
func CheckFileName(filename string, f chan<- string) {
for {
newFilename := fmt.Sprintf("%v", time.Now().Format("vpn_20060102.log"))
// We have a new filename, that send it into channel
if newFilename != filename {
log.Println("A new logfile", newFilename)
f <- newFilename
return
}
}
}
func main() {
filenameChan := make(chan string)
for {
filename = fmt.Sprintf("%v", time.Now().Format("vpn_20060102.log"))
t, err := tail.TailFile(filename, tail.Config{Follow: true, ReOpen: true, MustExist: true})
if err != nil {
log.Println("No log file", filename, ". Waiting for 10 minute")
time.Sleep(time.Minute * 10)
continue
}
// Send goroutine for checking filename in loop
go CheckFileName(filename, filenameChan)
// Send goroutine that stops lines read in range
go func() {
for {
select {
case name := <-filenameChan:
log.Println("Received a new name for log:", name)
t.Stop()
return
}
}
}()
if err != nil {
log.Fatalln(err)
}
for line := range t.Lines {
fmt.Println(line)
}
}
}
So, that works for me
Related
I want to loop through the menu's options. However, it stops at the first option, since the select without "default:" is blocking and it does not know more options will appear dynamically.
Bellow is the broken code:
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os/exec"
"strings"
"time"
"github.com/getlantern/systray"
"gopkg.in/yaml.v3"
)
var menuItensPtr []*systray.MenuItem
var config map[string]string
var commands []string
func main() {
config = readconfig()
systray.Run(onReady, onExit)
}
func onReady() {
systray.SetIcon(getIcon("assets/menu.ico"))
menuItensPtr = make([]*systray.MenuItem,0)
commands = make([]string,0)
for k, v := range config {
menuItemPtr := systray.AddMenuItem(k, k)
menuItensPtr = append(menuItensPtr, menuItemPtr)
commands = append(commands, v)
}
systray.AddSeparator()
// mQuit := systray.AddMenuItem("Quit", "Quits this app")
go func() {
for {
systray.SetTitle("My tray menu")
systray.SetTooltip("https://github.com/evandrojr/my-tray-menu")
time.Sleep(1 * time.Second)
}
}()
go func() {
for{
for i, menuItenPtr := range menuItensPtr {
select {
/// EXECUTION GETS STUCK HERE!!!!!!!
case<-menuItenPtr.ClickedCh:
execute(commands[i])
}
}
// select {
// case <-mQuit.ClickedCh:
// systray.Quit()
// return
// // default:
// }
}
}()
}
func onExit() {
// Cleaning stuff will go here.
}
func getIcon(s string) []byte {
b, err := ioutil.ReadFile(s)
if err != nil {
fmt.Print(err)
}
return b
}
func execute(commands string){
command_array:= strings.Split(commands, " ")
command:=""
command, command_array = command_array[0], command_array[1:]
cmd := exec.Command(command, command_array ...)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
// fmt.Printf("Output %s\n", out.String())
}
func readconfig() map[string]string{
yfile, err := ioutil.ReadFile("my-tray-menu.yaml")
if err != nil {
log.Fatal(err)
}
data := make(map[string]string)
err2 := yaml.Unmarshal(yfile, &data)
if err2 != nil {
log.Fatal(err2)
}
for k, v := range data {
fmt.Printf("%s -> %s\n", k, v)
}
return data
}
Bellow is the ugly workaround that works:
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/getlantern/systray"
"gopkg.in/yaml.v3"
)
var menuItensPtr []*systray.MenuItem
var config map[string]string
var commands []string
var labels []string
var programPath string
func main() {
setProgramPath()
config = readconfig()
time.Sleep(1 * time.Second)
systray.Run(onReady, onExit)
}
func onReady() {
systray.SetIcon(getIcon(filepath.Join(programPath,"assets/menu.ico")))
menuItensPtr = make([]*systray.MenuItem, 0)
i := 0
op0 := systray.AddMenuItem(labels[i], commands[i])
i++
op1 := systray.AddMenuItem(labels[i], commands[i])
i++
op2 := systray.AddMenuItem(labels[i], commands[i])
i++
op3 := systray.AddMenuItem(labels[i], commands[i])
i++
systray.AddSeparator()
mQuit := systray.AddMenuItem("Quit", "Quits this app")
go func() {
for {
systray.SetTitle("My tray menu")
systray.SetTooltip("https://github.com/evandrojr/my-tray-menu")
time.Sleep(1 * time.Second)
}
}()
go func() {
for {
select {
// HERE DOES NOT GET STUCK!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
case <-op0.ClickedCh:
execute(commands[0])
case <-op1.ClickedCh:
execute(commands[1])
case <-op2.ClickedCh:
execute(commands[2])
case <-op3.ClickedCh:
execute(commands[3])
case <-mQuit.ClickedCh:
systray.Quit()
return
}
}
}()
}
func onExit() {
// Cleaning stuff will go here.
}
func getIcon(s string) []byte {
b, err := ioutil.ReadFile(s)
if err != nil {
fmt.Print(err)
}
return b
}
func setProgramPath(){
ex, err := os.Executable()
if err != nil {
panic(err)
}
programPath = filepath.Dir(ex)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func execute(commands string) {
command_array := strings.Split(commands, " ")
command := ""
command, command_array = command_array[0], command_array[1:]
cmd := exec.Command(command, command_array...)
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
log.Fatal(err)
}
fmt.Printf("Output %s\n", out.String())
}
func readconfig() map[string]string {
yfile, err := ioutil.ReadFile(filepath.Join(programPath,"my-tray-menu.yaml"))
if err != nil {
log.Fatal(err)
}
data := make(map[string]string)
err2 := yaml.Unmarshal(yfile, &data)
if err2 != nil {
log.Fatal(err2)
}
labels = make([]string, 0)
commands = make([]string, 0)
for k, v := range data {
labels = append(labels, k)
commands = append(commands, v)
fmt.Printf("%s -> %s\n", k, v)
}
fmt.Print(len(labels))
return data
}
Full source code here:
https://github.com/evandrojr/my-tray-menu
select "chooses which of a set of possible send or receive operations will proceed". The spec sets out how this choice is made:
If one or more of the communications can proceed, a single one that can proceed is chosen via a uniform pseudo-random selection. Otherwise, if there is a default case, that case is chosen. If there is no default case, the "select" statement blocks until at least one of the communications can proceed.
Your working example:
select {
case <-op0.ClickedCh:
execute(commands[0])
case <-op1.ClickedCh:
execute(commands[1])
// ...
}
uses select successfully to choose between one of the offered options. However if you pass a single option e.g.
select {
case<-menuItenPtr.ClickedCh:
execute(commands[i])
}
}
The select will block until <-menuItenPtr.ClickedCh is ready to proceed (e.g. something is received). This is effectively the same as not using a select:
<-menuItenPtr.ClickedCh:
execute(commands[i])
The result you were expecting can be achieved by providing a default option:
select {
case<-menuItenPtr.ClickedCh:
execute(commands[i])
}
default:
}
As per the quote from the spec above the default option will be chosen if none of the other options can proceed. While this may work it's not a very good solution because you effectively end up with:
for {
// Check if event happened (not blocking)
}
This will tie up CPU time unnecessarily as it continually loops checking for events. A better solution would be to start a goroutine to monitor each channel:
for i, menuItenPtr := range menuItensPtr {
go func(c chan struct{}, cmd string) {
for range c { execute(cmd) }
}(menuItenPtr.ClickedCh, commands[i])
}
// Start another goroutine to handle quit
The above will probably work but does lead to the possibility that execute will be called concurrently (which might cause issues if your code is not threadsafe). One way around this is to use the "fan in" pattern (as suggested by #kostix and in the Rob Pike video suggested by #John); something like:
cmdChan := make(chan int)
for i, menuItenPtr := range menuItensPtr {
go func(c chan struct{}, cmd string) {
for range c { cmdChan <- cmd }
}(menuItenPtr.ClickedCh, commands[i])
}
go func() {
for {
select {
case cmd := <- cmdChan:
execute(cmd) // Handle command
case <-mQuit.ClickedCh:
systray.Quit()
return
}
}
}()
note: all code above entered directly into the question so please treat as pseudo code!
I'm trying to learn Go while writing a small program. The program should parse a PATH recursivelys as efficient and fast as possible and output the full filename (with the path included) and the sha256 file hash of the file.
If the file hashing generates fails, I wanna keep the error and add it to the string (at the hash position).
The result should return a string on the console like:
fileXYZ||hash
Unfortunately, the programs hangs at some point. I guess some of my channels are not closing properly and waiting indefinitely for input. I've been trying for quite some time to fix the problem, but without success.
Does anyone have an idea why the output hangs? Many many thx in advance, any input/advice for a Go newcomer is welcome too ;-).
(I wrote separate functions as I wanna add additional features after having fixed this issue.)
Thanks a lot!
Didier
Here is the code:
import (
"crypto/sha256"
"encoding/hex"
"flag"
"fmt"
"io"
"log"
"os"
"path/filepath"
"time"
)
func main() {
pathParam := flag.String("path", ".", "Enter Filesystem Path to list folders")
flag.Parse()
start := time.Now()
run(*pathParam)
elapsed := time.Since(start)
log.Printf("Time elapsed: %v", elapsed)
}
func run(path string) {
chashes := make(chan string, 50)
cfiles := make(chan string)
go func() {
readfs(path, cfiles)
defer close(cfiles)
}()
go func() {
generateHash(cfiles, chashes)
}()
defer close(chashes)
for hash := range chashes {
fmt.Println(hash)
}
}
func readfs(path string, cfiles chan string) {
files, err := os.ReadDir(path)
if err != nil {
log.Fatalln(err)
}
for _, file := range files {
filename := filepath.Join(path, file.Name())
if file.IsDir() {
readfs(filename, cfiles)
continue
} else {
cfiles <- filename
}
}
}
func generateHash(cfiles chan string, chashes chan string) {
for filename := range cfiles {
go func(filename string) {
var checksum string
var oError bool = false
file, err := os.Open(filename)
if err != nil {
oError = true
errorMsg := "ERROR: " + err.Error()
log.Println(errorMsg)
checksum = errorMsg
}
defer file.Close()
if !oError {
hash := sha256.New()
if _, err := io.Copy(hash, file); err != nil {
errorMsg := "ERROR: " + err.Error()
log.Println(errorMsg)
checksum = errorMsg
}
if len(checksum) == 0 {
checksum = hex.EncodeToString(hash.Sum(nil))
}
}
chashes <- filename + "||" + checksum
}(filename)
} //for files
}
The following loop hangs because chashes is not closed.
for hash := range chashes {
fmt.Println(hash)
}
Fix by closing chashes after all the hashers are completed. Use a sync.WaitGroup to wait for the hashers to complete.
func generateHash(cfiles chan string, chashes chan string) {
var wg sync.WaitGroup
for filename := range cfiles {
wg.Add(1)
go func(filename string) {
defer wg.Done()
var checksum string
var oError bool = false
file, err := os.Open(filename)
if err != nil {
oError = true
errorMsg := "ERROR: " + err.Error()
log.Println(errorMsg)
checksum = errorMsg
}
defer file.Close()
if !oError {
hash := sha256.New()
if _, err := io.Copy(hash, file); err != nil {
errorMsg := "ERROR: " + err.Error()
log.Println(errorMsg)
checksum = errorMsg
}
if len(checksum) == 0 {
checksum = hex.EncodeToString(hash.Sum(nil))
}
}
chashes <- filename + "||" + checksum
}(filename)
} //for files
// Wait for the hashers to complete.
wg.Wait()
// Close the channel to cause main() to break
// out of for range on chashes.
close(chashes)
}
Remove defer close(chashes) from run().
Run an example on the Go playground.
I am building something to monitor a directory for file uploads. Right now I am using a for {} loop to continuously read the directory for testing purposes with the plan to use cron or something in the future to launch my application.
The goal is to monitor an upload directory, ensure files have finished copying, then move the files to another directory for processing. The files themselves range from 15GB to about 50GB and we will be receiving hundreds daily.
This is my first foray into go routines. I am not sure if I am completely misunderstanding go routines, channels and wait groups or something but I had thought that as I loop through a list of files, each file gets processed by a go routine function independently. However when I run the below code it grabs a file but only acknowledges the first file it finds in the directory. I noticed though once the first file finishes other files are acknowledged as completed.
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"sync"
"time"
"gopkg.in/yaml.v2"
)
type Config struct {
LogFileName string `yaml:"logfilename"`
LogFilePath string `yaml:"logfilepath"`
UploadRoot string `yaml:"upload_root"`
TPUploadTool string `yaml:"tp_upload_tool"`
}
var wg sync.WaitGroup
const WORKERS = 5
func getConfig(fileName string) (*Config, error) {
conf := &Config{}
yamlFile, err := os.Open(fileName)
if err != nil {
fmt.Printf("Error reading YAML file: %s\n", err)
os.Exit(1)
}
defer yamlFile.Close()
yaml_decoder := yaml.NewDecoder(yamlFile)
if err := yaml_decoder.Decode(conf); err != nil {
return nil, err
}
return conf, err
}
func getFileData(fileToUpload string, fileStatus chan string) {
var newSize int64
var currentSize int64
currentSize = 0
newSize = 0
fmt.Printf("Uploading: %s\n", fileToUpload)
fileDone := false
for !fileDone {
fileToUploadStat, _ := os.Stat(fileToUpload)
currentSize = fileToUploadStat.Size()
//fmt.Printf("%s current size is: %d\n", fileToUpload, currentSize)
//fmt.Println("New size ", newSize)
if currentSize != 0 {
if currentSize > newSize {
newSize = currentSize
} else if newSize == currentSize {
fileStatus <- "Done"
fileDone = true
wg.Done()
}
}
time.Sleep(1 * time.Second)
}
}
func sendToCDS() {
fmt.Println("Sending To CDS")
}
func main() {
fileStatus := make(chan string)
configFileName := flag.String("config", "", "YAML configuration file.\n")
flag.Parse()
if *configFileName == "" {
flag.PrintDefaults()
os.Exit(1)
}
UploaderConfig, err := getConfig(*configFileName)
if err != nil {
log.Fatal("Error reading configuration file.")
}
for {
fmt.Print("Checking for new files..")
uploadFiles, err := ioutil.ReadDir(UploaderConfig.UploadRoot)
if err != nil {
log.Fatal(err)
}
if len(uploadFiles) == 0 {
fmt.Println("..no files to transfer.\n")
}
for _, uploadFile := range uploadFiles {
wg.Add(1)
fmt.Println("...adding", uploadFile.Name())
if err != nil {
log.Fatalln("Unable to read file information.")
}
ff := UploaderConfig.UploadRoot + "/" + uploadFile.Name()
go getFileData(ff, fileStatus)
status := <-fileStatus
if status == "Done" {
fmt.Printf("%s is done.\n", uploadFile.Name())
os.Remove(ff)
}
}
wg.Wait()
}
}
I had thought about using channels for a thread safe queueing mechanism that loads up with the files in the directory and then the files get picked up by workers. I have done similar things in Python.
Because of the following lines, the code is processing each file sequentially:
go getFileData(ff, fileStatus)
status := <-fileStatus
The first line creates a goroutine, but the second line waits until that goroutine finishes its work.
If you want to process files in parallel, then you can use a worker pool pattern.
jobs:=make(chan string)
done:=make(chan struct{})
for i:=0;i<nWorkers;i++ {
go workerFunc(jobs,done)
}
The jobs channel will be used to send newly discovered files to workers. When you discover a new file, you can simply:
jobs <- fileName
and the worker should process the file, and it should go back to reading from the channel. So it should look like:
func worker(ch chan string,done chan struct{}) {
defer func() {
done<-struct{}{} // Notify that this goroutine is completed
}()
for inputFile:=range ch {
// process inputFile
}
}
When everything is done, you can terminate the program by waiting for all the goroutines to complete:
close(jobs)
for i:=0;i<nWorkers;i++ {
<-done
}
I have a goroutine inside a loop and the way I am handling the error is that I add it to a channel and after all the goroutines are finished, I check if there was an error and I return accordingly.
The issue with this is that I want to return an error as soon as I get it so that I don't spend time waiting for all the goroutines to finish as it would be inefficient.
I tried adding the select statement but it doesn't work and I can't add the select statement inside the goroutines since I want to exit the for loop and the try function too.
How can I do this?
Here is the code:
package main
import (
"sync"
"runtime"
"fmt"
"errors"
)
func try() (bool, error) {
wg := new(sync.WaitGroup)
s := []int{0,1,2,3,4,5}
ec := make(chan error)
for i, val := range s {
/*
select {
case err, ok := <-ec:
if ok {
println("error 1", err.Error())
return false, err
}
default:
}
*/
wg.Add(1)
i := i
val := val
go func() {
err := func(i int, val int, wg *sync.WaitGroup) error {
defer wg.Done()
if i == 3 {
return errors.New("one error")
} else {
return nil
}
}(i, val, wg)
if err != nil {
ec <- err
return
}
}()
}
wg.Wait()
select {
case err, ok := <-ec:
if ok {
println("error 2", err.Error())
return false, err
}
default:
}
return true, nil
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
b, e := try()
if e != nil {
fmt.Println(e.Error(), b)
}
}
This is the go playground link
With wg.Wait() before your select statement, you are effectively waiting for all goroutines to return.
The issue with this is that I want to return an error as soon as I get it
I assume that with this you mean stopping running goroutines as soon as any one of them returns an error.
In this case, you could use context.Context to manage cancellation, but even better is an errgroup.Group, which nicely combines context functionality and synchronization:
Package errgroup provides synchronization, error propagation, and Context cancelation for groups of goroutines working on subtasks of a common task.
In particular Group.Go:
The first call to return a non-nil error cancels the group; its error will be returned by Wait.
import (
"sync"
"runtime"
"fmt"
"errors"
"golang.org/x/sync/errgroup"
)
func try() (bool, error) {
errg := new(errgroup.Group)
s := []int{0,1,2,3,4,5}
for i, val := range s {
i := i
val := val
errg.Go(func() error {
return func(i int, val int) error {
if i == 3 {
return errors.New("one error")
} else {
return nil
}
}(i, val)
})
}
if err := errg.Wait(); err != nil {
// handle error
}
return true, nil
}
https://play.golang.org/p/lSIIFJqXf0W
I have found tomb to be useful for this. Below is a stripped-down non-working example that shows the gist, without handling things like variable encapsulation in the loop. It should give you the idea, but I'm happy to clarify on any points.
package main
import (
"fmt"
"gopkg.in/tomb.v2"
"sync"
)
func main() {
ts := tomb.Tomb{}
s := []int{0,1,2,3,4,5}
for i, v := range s {
ts.Go(func() error {
// do some work here or return an error, make sure to watch the dying chan, if it closes,
//then one of the other go-routines failed.
select {
case <- ts.Dying():
return nil
case err := <- waitingForWork():
if err != nil {
return err
}
return nil
}
})
}
// If an error appears here, one of the go-routines must have failed
err := ts.Wait()
if err != nil {
fmt.Println(err)
}
}
I'm trying to prevent a program from opening another instance if it's already open, to do this I create a file with .lock extension and remove it when exiting the program. However everything except the remove works.
package main
import (
"os"
"os/signal"
"fmt"
)
func main() {
var test string
exitsig := make(chan os.Signal, 1)
signal.Notify(exitsig, os.Interrupt)
var (
lockstate bool = false
)
if _, err := os.Stat("ms.lock"); err == nil {
return
} else if os.IsNotExist(err) {
var file, err = os.Create("ms.lock")
if err != nil {
return
}
file.Close()
lockstate = true
}
go func() {
<- exitsig
fmt.Println("Error removing file")
fmt.Scanf("%s", &test)
if lockstate {
var err = os.Remove("ms.lock")
if err != nil {
fmt.Println("Error removing file")
fmt.Scanf("%s", &test)
}
}
os.Exit(0)
}()
}
I've tried exiting by ctrl+c, exiting by pressing the close button on the top right corner of the window but it never sends a signal, the os.Interrupt signal is never caught. What is the reason for this?
Also, I need the signal to be non-platform specific, so it should work on both windows and unix based systems.
I think it's because your main function exists soon after launching the goroutine. If the main function ends, all running goroutines will die too.
Here's code that works for me:
package main
import (
"fmt"
"os"
"os/signal"
"sync"
)
func main() {
exitsig := make(chan os.Signal, 1)
signal.Notify(exitsig, os.Interrupt)
var (
lockstate bool = false
)
if _, err := os.Stat("ms.lock"); err == nil {
return
} else if os.IsNotExist(err) {
var file, err = os.Create("ms.lock")
if err != nil {
return
}
file.Close()
lockstate = true
}
go func() {
<-exitsig
if lockstate {
var err = os.Remove("ms.lock")
if err != nil {
fmt.Println("Error removing file: ", err)
}
}
os.Exit(0)
}()
wg := &sync.WaitGroup{}
wg.Add(1)
wg.Wait()
}
I added the waitgroup to wait in the main thread. Works fine on MacOS - creates ms.lock file and waits. Killing it with Cmd + C removes the file.
Should work anywhere as long as the signal fires.