forked from wal-g/wal-g
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathstructs.go
460 lines (394 loc) · 11.7 KB
/
structs.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
package walg
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface"
"github.com/pkg/errors"
)
// EXCLUDE is a list of excluded members from the bundled backup.
var EXCLUDE = make(map[string]Empty)
func init() {
EXCLUDE["pg_log"] = Empty{}
EXCLUDE["pg_xlog"] = Empty{}
EXCLUDE["pg_wal"] = Empty{}
EXCLUDE["pgsql_tmp"] = Empty{}
EXCLUDE["postgresql.auto.conf.tmp"] = Empty{}
EXCLUDE["postmaster.pid"] = Empty{}
EXCLUDE["postmaster.opts"] = Empty{}
EXCLUDE["recovery.conf"] = Empty{}
// DIRECTORIES
EXCLUDE["pg_dynshmem"] = Empty{}
EXCLUDE["pg_notify"] = Empty{}
EXCLUDE["pg_replslot"] = Empty{}
EXCLUDE["pg_serial"] = Empty{}
EXCLUDE["pg_stat_tmp"] = Empty{}
EXCLUDE["pg_snapshots"] = Empty{}
EXCLUDE["pg_subtrans"] = Empty{}
}
// Empty is used for channel signaling.
type Empty struct{}
// NilWriter to /dev/null
type NilWriter struct{}
// Write to /dev/null
func (nw *NilWriter) Write(p []byte) (n int, err error) {
return len(p), nil
}
// TarBundle represents one completed directory.
type TarBundle interface {
NewTarBall(dedicatedUploader bool)
GetIncrementBaseLsn() *uint64
GetIncrementBaseFiles() BackupFileList
StartQueue()
Deque() TarBall
EnqueueBack(tb TarBall, parallelOpInProgress *bool)
CheckSizeAndEnqueueBack(tb TarBall) error
FinishQueue() error
GetFiles() *sync.Map
}
// A Bundle represents the directory to
// be walked. Contains at least one TarBall
// if walk has started. Each TarBall will be at least
// MinSize bytes. The Sentinel is used to ensure complete
// uploaded backups; in this case, pg_control is used as
// the sentinel.
type Bundle struct {
MinSize int64
Sen *Sentinel
Tb TarBall
Tbm TarBallMaker
Crypter OpenPGPCrypter
Timeline uint32
Replica bool
IncrementFromLsn *uint64
IncrementFromFiles BackupFileList
tarballQueue chan (TarBall)
uploadQueue chan (TarBall)
parallelTarballs int
maxUploadQueue int
mutex sync.Mutex
started bool
Files *sync.Map
}
func (b *Bundle) GetFiles() *sync.Map { return b.Files }
func (b *Bundle) StartQueue() {
if b.started {
panic("Trying to start already started Queue")
}
b.parallelTarballs = getMaxUploadDiskConcurrency()
b.maxUploadQueue = getMaxUploadQueue()
b.tarballQueue = make(chan (TarBall), b.parallelTarballs)
b.uploadQueue = make(chan (TarBall), b.parallelTarballs+b.maxUploadQueue)
for i := 0; i < b.parallelTarballs; i++ {
b.NewTarBall(true)
b.tarballQueue <- b.Tb
}
b.started = true
}
func (b *Bundle) Deque() TarBall {
if !b.started {
panic("Trying to deque from not started Queue")
}
return <-b.tarballQueue
}
func (b *Bundle) FinishQueue() error {
if !b.started {
panic("Trying to stop not started Queue")
}
b.started = false
// At this point no new tarballs should be put into uploadQueue
for len(b.uploadQueue) > 0 {
select {
case otb := <-b.uploadQueue:
otb.AwaitUploads()
default:
}
}
// We have to deque exactly this count of workers
for i := 0; i < b.parallelTarballs; i++ {
tb := <-b.tarballQueue
if tb.Tw() == nil {
// This had written nothing
continue
}
err := tb.CloseTar()
if err != nil {
return errors.Wrap(err, "TarWalker: failed to close tarball")
}
tb.AwaitUploads()
}
return nil
}
func (b *Bundle) EnqueueBack(tb TarBall, parallelOpInProgress *bool) {
if !*parallelOpInProgress {
b.tarballQueue <- tb
}
}
func (b *Bundle) CheckSizeAndEnqueueBack(tb TarBall) error {
if tb.Size() > b.MinSize {
b.mutex.Lock()
defer b.mutex.Unlock()
err := tb.CloseTar()
if err != nil {
return errors.Wrap(err, "TarWalker: failed to close tarball")
}
b.uploadQueue <- tb
for len(b.uploadQueue) > b.maxUploadQueue {
select {
case otb := <-b.uploadQueue:
otb.AwaitUploads()
default:
}
}
b.NewTarBall(true)
tb = b.Tb
}
b.tarballQueue <- tb
return nil
}
// NewTarBall starts writing new tarball
func (b *Bundle) NewTarBall(dedicatedUploader bool) {
ntb := b.Tbm.Make(dedicatedUploader)
b.Tb = ntb
}
// GetIncrementBaseLsn returns LSN of previous backup
func (b *Bundle) GetIncrementBaseLsn() *uint64 { return b.IncrementFromLsn }
// GetIncrementBaseFiles returns list of Files from previous backup
func (b *Bundle) GetIncrementBaseFiles() BackupFileList { return b.IncrementFromFiles }
// Sentinel is used to signal completion of a walked
// directory.
type Sentinel struct {
Info os.FileInfo
path string
}
// A TarBall represents one tar file.
type TarBall interface {
SetUp(crypter Crypter, args ...string)
CloseTar() error
Finish(sentinel *S3TarBallSentinelDto) error
BaseDir() string
Trim() string
Nop() bool
Number() int
Size() int64
AddSize(int64)
Tw() *tar.Writer
AwaitUploads()
}
// BackupFileList is a map of file properties in a backup
type BackupFileList map[string]BackupFileDescription
// S3TarBall represents a tar file that is
// going to be uploaded to S3.
type S3TarBall struct {
baseDir string
trim string
bkupName string
nop bool
number int
size int64
w io.WriteCloser
tw *tar.Writer
tu *TarUploader
Lsn *uint64
IncrementFromLsn *uint64
IncrementFrom string
Files BackupFileList
}
// SetUp creates a new tar writer and starts upload to S3.
// Upload will block until the tar file is finished writing.
// If a name for the file is not given, default name is of
// the form `part_....tar.lz4`.
func (s *S3TarBall) SetUp(crypter Crypter, names ...string) {
if s.tw == nil {
var name string
if len(names) > 0 {
name = names[0]
} else {
name = "part_" + fmt.Sprintf("%0.3d", s.number) + ".tar.lz4"
}
w := s.StartUpload(name, crypter)
s.w = w
s.tw = tar.NewWriter(w)
}
}
// CloseTar closes the tar writer, flushing any unwritten data
// to the underlying writer before also closing the underlying writer.
func (s *S3TarBall) CloseTar() error {
err := s.tw.Close()
if err != nil {
return errors.Wrap(err, "CloseTar: failed to close tar writer")
}
err = s.w.Close()
if err != nil {
return errors.Wrap(err, "CloseTar: failed to close underlying writer")
}
fmt.Printf("Finished writing part %d.\n", s.number)
return nil
}
// ErrSentinelNotUploaded happens when upload of json sentinel failed
var ErrSentinelNotUploaded = errors.New("Sentinel was not uploaded due to timeline change during backup")
func (b *S3TarBall) AwaitUploads() {
b.tu.wg.Wait()
}
// S3TarBallSentinelDto describes file structure of json sentinel
type S3TarBallSentinelDto struct {
LSN *uint64
IncrementFromLSN *uint64 `json:"DeltaFromLSN,omitempty"`
IncrementFrom *string `json:"DeltaFrom,omitempty"`
IncrementFullName *string `json:"DeltaFullName,omitempty"`
IncrementCount *int `json:"DeltaCount,omitempty"`
Files BackupFileList
PgVersion int
FinishLSN *uint64
UserData interface{} `json:"UserData,omitempty"`
}
func (s *S3TarBallSentinelDto) SetFiles(p *sync.Map) {
s.Files = make(BackupFileList)
p.Range(func(k, v interface{}) bool {
key := k.(string)
description := v.(BackupFileDescription)
s.Files[key] = description
return true
})
}
// BackupFileDescription contains properties of one backup file
type BackupFileDescription struct {
IsIncremented bool // should never be both incremented and Skipped
IsSkipped bool
MTime time.Time
}
// IsIncremental checks that sentinel represents delta backup
func (dto *S3TarBallSentinelDto) IsIncremental() bool {
// If we have increment base, we must have all the rest properties.
// If we do not have base - anything else is a mistake
if dto.IncrementFrom != nil {
if dto.IncrementFromLSN == nil || dto.IncrementFullName == nil || dto.IncrementCount == nil {
panic("Inconsistent S3TarBallSentinelDto")
}
} else if dto.IncrementFromLSN != nil && dto.IncrementFullName != nil && dto.IncrementCount != nil {
panic("Inconsistent S3TarBallSentinelDto")
}
return dto.IncrementFrom != nil
}
// Finish writes a .json file description and uploads it with the
// the backup name. Finish will wait until all tar file parts
// have been uploaded. The json file will only be uploaded
// if all other parts of the backup are present in S3.
// an alert is given with the corresponding error.
func (s *S3TarBall) Finish(sentinel *S3TarBallSentinelDto) error {
var err error
name := s.bkupName + "_backup_stop_sentinel.json"
tupl := s.tu
tupl.Finish()
//If other parts are successful in uploading, upload json file.
if tupl.Success && sentinel != nil {
sentinel.UserData = GetSentinelUserData()
dtoBody, err := json.Marshal(*sentinel)
if err != nil {
return err
}
path := tupl.server + "/basebackups_005/" + name
input := &s3manager.UploadInput{
Bucket: aws.String(tupl.bucket),
Key: aws.String(path),
Body: bytes.NewReader(dtoBody),
StorageClass: aws.String(tupl.StorageClass),
}
if tupl.ServerSideEncryption != "" {
input.ServerSideEncryption = aws.String(tupl.ServerSideEncryption)
if tupl.SSEKMSKeyId != "" {
// Only aws:kms implies sseKmsKeyId, checked during validation
input.SSEKMSKeyId = aws.String(tupl.SSEKMSKeyId)
}
}
tupl.wg.Add(1)
go func() {
defer tupl.wg.Done()
e := tupl.upload(input, path)
if e != nil {
log.Printf("upload: could not upload '%s'\n", path)
log.Fatalf("S3TarBall Finish: json failed to upload")
}
}()
tupl.Finish()
} else {
log.Printf("Uploaded %d compressed tar Files.\n", s.number)
log.Printf("Sentinel was not uploaded %v", name)
return ErrSentinelNotUploaded
}
if err == nil && tupl.Success {
fmt.Printf("Uploaded %d compressed tar Files.\n", s.number)
}
return err
}
// BaseDir of a backup
func (s *S3TarBall) BaseDir() string { return s.baseDir }
// Trim suffix
func (s *S3TarBall) Trim() string { return s.trim }
// Nop is a dummy fonction for test purposes
func (s *S3TarBall) Nop() bool { return s.nop }
// Number of parts
func (s *S3TarBall) Number() int { return s.number }
// Size accumulated in this tarball
func (s *S3TarBall) Size() int64 { return s.size }
// AddSize to total Size
func (s *S3TarBall) AddSize(i int64) { s.size += i }
// Tw is tar writer
func (s *S3TarBall) Tw() *tar.Writer { return s.tw }
// TarUploader contains fields associated with uploading tarballs.
// Multiple tarballs can share one uploader. Must call CreateUploader()
// in 'upload.go'.
type TarUploader struct {
Upl s3manageriface.UploaderAPI
ServerSideEncryption string
SSEKMSKeyId string
StorageClass string
Success bool
bucket string
server string
region string
wg *sync.WaitGroup
}
// NewTarUploader creates a new tar uploader without the actual
// S3 uploader. CreateUploader() is used to configure byte size and
// concurrency streams for the uploader.
func NewTarUploader(svc s3iface.S3API, bucket, server, region string) *TarUploader {
return &TarUploader{
StorageClass: "STANDARD",
bucket: bucket,
server: server,
region: region,
wg: &sync.WaitGroup{},
}
}
// Finish waits for all waiting parts to be uploaded. If an error occurs,
// prints alert to stderr.
func (tu *TarUploader) Finish() {
tu.wg.Wait()
if !tu.Success {
log.Printf("WAL-G could not complete upload.\n")
}
}
// Clone creates similar TarUploader with new WaitGroup
func (tu *TarUploader) Clone() *TarUploader {
return &TarUploader{
tu.Upl,
tu.ServerSideEncryption,
tu.SSEKMSKeyId,
tu.StorageClass,
tu.Success,
tu.bucket,
tu.server,
tu.region,
&sync.WaitGroup{},
}
}