This repository was archived by the owner on Dec 5, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 9
Expand file tree
/
Copy pathexample_test.go
More file actions
69 lines (60 loc) · 1.27 KB
/
example_test.go
File metadata and controls
69 lines (60 loc) · 1.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
package jobqueue_test
import (
"context"
"fmt"
"time"
"github.com/olivere/jobqueue"
)
func ExampleManager() {
// Create a new manager with 10 concurrent workers for rank 0 and 2 for rank 1
m := jobqueue.New(
jobqueue.SetConcurrency(0, 10),
jobqueue.SetConcurrency(1, 2),
)
// Register the processor for topic "crawl"
jobDone := make(chan struct{}, 1)
err := m.Register("crawl", func(job *jobqueue.Job) error {
url, _ := job.Args[0].(string)
fmt.Printf("Crawl %s\n", url)
jobDone <- struct{}{}
return nil
})
if err != nil {
fmt.Println("Register failed")
return
}
// Start the manager
err = m.Start()
if err != nil {
fmt.Println("Start failed")
return
}
fmt.Println("Started")
// Add a new crawler job
job := &jobqueue.Job{Topic: "crawl", Args: []interface{}{"https://alt-f4.de"}}
err = m.Add(context.Background(), job)
if err != nil {
fmt.Println("Add failed")
return
}
fmt.Println("Job added")
// Wait for the crawler job to complete
select {
case <-jobDone:
case <-time.After(5 * time.Second):
fmt.Println("Job timed out")
return
}
// Stop/Close the manager
err = m.Stop()
if err != nil {
fmt.Println("Stop failed")
return
}
fmt.Println("Stopped")
// Output:
// Started
// Job added
// Crawl https://alt-f4.de
// Stopped
}