-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathblocks-mined.go
executable file
·427 lines (353 loc) · 11.7 KB
/
blocks-mined.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
package main
import (
"encoding/csv"
"fmt"
"log"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"sync/atomic"
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/core/rawdb"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/ethdb/leveldb"
)
const (
epoch1Start = 1
epoch1End = 600000
epoch2Start = 600001
epoch2End = 1200000
)
type MinerCount struct {
Address string
Zone string
Count int
EpochStart uint64
EpochEnd uint64
}
type BlockStats struct {
TotalChecked uint64
EmptyHashes uint64
NilBlocks uint64
Successful uint64
OutOfRange uint64
}
type GlobalStats struct {
TotalChecked uint64
TotalErrors uint64
TotalSuccesses uint64
OutOfRange uint64
}
func (gs *GlobalStats) AddChecked(n uint64) {
atomic.AddUint64(&gs.TotalChecked, n)
}
func (gs *GlobalStats) AddErrors(n uint64) {
atomic.AddUint64(&gs.TotalErrors, n)
}
func (gs *GlobalStats) AddSuccesses(n uint64) {
atomic.AddUint64(&gs.TotalSuccesses, n)
}
func (gs *GlobalStats) AddOutOfRange(n uint64) {
atomic.AddUint64(&gs.OutOfRange, n)
}
// DatabaseReader wraps leveldb.Database to implement ethdb.Reader
type DatabaseReader struct {
db *leveldb.Database
}
// Has retrieves if a key is present in the key-value data store.
func (dr *DatabaseReader) Has(key []byte) (bool, error) {
return dr.db.Has(key)
}
// Get retrieves the given key if it's present in the key-value data store.
func (dr *DatabaseReader) Get(key []byte) ([]byte, error) {
return dr.db.Get(key)
}
// Ancient returns an error as we don't support ancient data
func (dr *DatabaseReader) Ancient(kind string, number uint64) ([]byte, error) {
return nil, fmt.Errorf("ancient not supported")
}
// Ancients returns the length of the ancient data
func (dr *DatabaseReader) Ancients() (uint64, error) {
return 0, nil
}
// AncientRange returns an error as we don't support ancient data
func (dr *DatabaseReader) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
return nil, fmt.Errorf("ancient range not supported")
}
// HasAncient returns if ancient data exists
func (dr *DatabaseReader) HasAncient(kind string, number uint64) (bool, error) {
return false, nil
}
// AncientSize returns the size of the ancient data
func (dr *DatabaseReader) AncientSize(kind string) (uint64, error) {
return 0, nil
}
// Tail returns the oldest available ancient number, or nil if no ancients are available
func (dr *DatabaseReader) Tail() (uint64, error) {
return 0, nil
}
func openDatabase(path string) (ethdb.Reader, error) {
log.Printf("Attempting to open database at: %s", path)
db, err := leveldb.New(path, 0, 0, "", false)
if err != nil {
log.Printf("Error opening database: %v", err)
return nil, fmt.Errorf("failed to open database: %v", err)
}
log.Printf("Successfully opened database at: %s", path)
// Try to read some keys to understand the database structure
iter := db.NewIterator(nil, nil)
defer iter.Release()
count := 0
log.Printf("Sampling first 10 keys in database:")
for iter.Next() && count < 10 {
key := iter.Key()
log.Printf("Found key: %x (length: %d)", key, len(key))
count++
}
if err := iter.Error(); err != nil {
log.Printf("Error iterating database: %v", err)
}
return &DatabaseReader{db: db}, nil
}
func (dr *DatabaseReader) Close() error {
return dr.db.Close()
}
func findZones(rootDir string) ([]string, error) {
var zones []string
// List all zone directories in the root backup directory
entries, err := os.ReadDir(rootDir)
if err != nil {
return nil, err
}
for _, entry := range entries {
if !entry.IsDir() {
continue
}
// Skip prime and region directories
if strings.HasPrefix(entry.Name(), "prime") || strings.HasPrefix(entry.Name(), "region") {
log.Printf("Skipping non-zone directory: %s", entry.Name())
continue
}
// For each zone directory, look for quai/chaindata
zonePath := filepath.Join(rootDir, entry.Name(), "quai", "chaindata")
if _, err := os.Stat(zonePath); err == nil {
// Check if directory has .ldb files
files, err := os.ReadDir(zonePath)
if err != nil {
continue
}
hasLDB := false
for _, file := range files {
if filepath.Ext(file.Name()) == ".ldb" {
hasLDB = true
break
}
}
if hasLDB {
zones = append(zones, zonePath)
log.Printf("Found zone chaindata: %s (Zone: %s)", zonePath, entry.Name())
}
}
}
if len(zones) == 0 {
log.Printf("Warning: No zone directories found (skipped prime and region directories)")
}
return zones, nil
}
func processZoneEpoch(db ethdb.Reader, zoneName string, start, end uint64, results chan<- MinerCount, globalStats *GlobalStats) error {
minerCounts := make(map[string]int)
stats := BlockStats{}
log.Printf("[%s] Processing epoch from block %d to %d", zoneName, start, end)
for i := start; i <= end; i++ {
atomic.AddUint64(&stats.TotalChecked, 1)
globalStats.AddChecked(1)
hash := rawdb.ReadCanonicalHash(db, i)
if hash == (common.Hash{}) {
atomic.AddUint64(&stats.EmptyHashes, 1)
globalStats.AddErrors(1)
log.Printf("[%s] Block %d: Empty hash encountered", zoneName, i)
continue
}
block := rawdb.ReadBlock(db, hash, i)
if block == nil {
atomic.AddUint64(&stats.NilBlocks, 1)
globalStats.AddErrors(1)
if i%10000 == 0 {
log.Printf("[%s] Block %d: Nil block encountered", zoneName, i)
}
continue
}
// Verify block number is in correct range
blockNum := block.Number().Uint64()
if blockNum < start || blockNum > end {
atomic.AddUint64(&stats.OutOfRange, 1)
globalStats.AddOutOfRange(1)
if i%10000 == 0 {
coinbase := block.Coinbase().Hex()
log.Printf("[%s] Block %d Details:", zoneName, i)
log.Printf(" Hash: %s", hash.Hex())
log.Printf(" Miner: %s", coinbase)
log.Printf(" Block Number: %v", blockNum)
log.Printf(" Timestamp: %v", block.Time())
log.Printf(" Parent Hash: %v", block.ParentHash().Hex())
log.Printf(" Transaction Count: %d", len(block.Transactions()))
log.Printf("[%s] Block %d outside epoch range %d-%d i:%d", zoneName, blockNum, start, end, i)
}
// continue
}
atomic.AddUint64(&stats.Successful, 1)
globalStats.AddSuccesses(1)
coinbase := block.Coinbase().Hex()
minerCounts[coinbase]++
}
// Print final statistics for this epoch
log.Printf("\n[%s] Epoch %d-%d Processing Complete:", zoneName, start, end)
log.Printf(" Total Blocks Checked: %d", atomic.LoadUint64(&stats.TotalChecked))
log.Printf(" Successfully Processed: %d", atomic.LoadUint64(&stats.Successful))
log.Printf(" Empty Hash Errors: %d", atomic.LoadUint64(&stats.EmptyHashes))
log.Printf(" Nil Block Errors: %d", atomic.LoadUint64(&stats.NilBlocks))
log.Printf(" Out of Range Blocks: %d", atomic.LoadUint64(&stats.OutOfRange))
// Send results with epoch information
for addr, count := range minerCounts {
results <- MinerCount{
Address: addr,
Zone: zoneName,
Count: count,
EpochStart: start,
EpochEnd: end,
}
}
return nil
}
func writeResults(results []MinerCount, outputFile string) error {
// Sort results by epoch and count
sort.Slice(results, func(i, j int) bool {
if results[i].EpochStart != results[j].EpochStart {
return results[i].EpochStart < results[j].EpochStart
}
return results[i].Count > results[j].Count
})
file, err := os.Create(outputFile)
if err != nil {
return fmt.Errorf("failed to create output file: %v", err)
}
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
// Write header
if err := writer.Write([]string{"Zone", "Miner Address", "Blocks Mined", "Epoch Range"}); err != nil {
return fmt.Errorf("failed to write CSV header: %v", err)
}
// Write data
for _, result := range results {
epochRange := fmt.Sprintf("%d-%d", result.EpochStart, result.EpochEnd)
if err := writer.Write([]string{
result.Zone,
result.Address,
fmt.Sprintf("%d", result.Count),
epochRange,
}); err != nil {
return fmt.Errorf("failed to write CSV row: %v", err)
}
}
return nil
}
func main() {
if len(os.Args) != 2 {
log.Fatal("Usage: ./blocks-mined <root-directory>")
}
rootDir := os.Args[1]
// Find all zone directories
zones, err := findZones(rootDir)
if err != nil {
log.Fatalf("Error finding zones: %v", err)
}
log.Printf("Found %d zones", len(zones))
for _, zone := range zones {
log.Printf("Zone found: %s", zone)
}
// Channel for collecting results from all goroutines
resultsEpoch1 := make(chan MinerCount, 1000)
resultsEpoch2 := make(chan MinerCount, 1000)
var wg sync.WaitGroup
globalStats := &GlobalStats{}
// Process each zone
for _, zonePath := range zones {
// Extract zone name from path (the directory name above 'quai')
zoneName := filepath.Base(filepath.Dir(filepath.Dir(zonePath))) // This gets the zone directory name (prime, region-0, zone-1-1, etc.)
log.Printf("Processing zone: %s at path: %s", zoneName, zonePath)
// Open the database for this zone
db, err := openDatabase(zonePath)
if err != nil {
log.Printf("Error opening database for zone %s: %v", zoneName, err)
continue
}
wg.Add(2)
// Process epoch 1
go func(db ethdb.Reader, zone string) {
defer wg.Done()
log.Printf("[%s] Starting Epoch 1 (%d-%d)", zone, epoch1Start, epoch1End)
if err := processZoneEpoch(db, zone, epoch1Start, epoch1End, resultsEpoch1, globalStats); err != nil {
log.Printf("Error processing epoch 1 for zone %s: %v", zone, err)
}
}(db, zoneName)
// Process epoch 2
go func(db ethdb.Reader, zone string) {
defer wg.Done()
log.Printf("[%s] Starting Epoch 2 (%d-%d)", zone, epoch2Start, epoch2End)
if err := processZoneEpoch(db, zone, epoch2Start, epoch2End, resultsEpoch2, globalStats); err != nil {
log.Printf("Error processing epoch 2 for zone %s: %v", zone, err)
}
}(db, zoneName)
if closer, ok := db.(interface{ Close() error }); ok {
defer closer.Close()
}
}
// Wait for all processing to complete in a separate goroutine
go func() {
wg.Wait()
close(resultsEpoch1)
close(resultsEpoch2)
}()
// Collect results
var epoch1Results []MinerCount
var epoch2Results []MinerCount
log.Println("Collecting results from all zones...")
// Collect epoch 1 results
for result := range resultsEpoch1 {
epoch1Results = append(epoch1Results, result)
}
log.Printf("Collected %d miner records for Epoch 1 (%d-%d)",
len(epoch1Results), epoch1Start, epoch1End)
// Collect epoch 2 results
for result := range resultsEpoch2 {
epoch2Results = append(epoch2Results, result)
}
log.Printf("Collected %d miner records for Epoch 2 (%d-%d)",
len(epoch2Results), epoch2Start, epoch2End)
// Print final global statistics
log.Printf("\nFINAL GLOBAL STATISTICS:")
log.Printf("Total Blocks Checked: %d", atomic.LoadUint64(&globalStats.TotalChecked))
log.Printf("Successfully Processed: %d", atomic.LoadUint64(&globalStats.TotalSuccesses))
log.Printf("Total Errors: %d", atomic.LoadUint64(&globalStats.TotalErrors))
log.Printf("Out of Range Blocks: %d", atomic.LoadUint64(&globalStats.OutOfRange))
if globalStats.TotalChecked > 0 {
successRate := float64(globalStats.TotalSuccesses) / float64(globalStats.TotalChecked) * 100
log.Printf("Overall Success Rate: %.2f%%", successRate)
}
// Write results to files
log.Println("Writing results to CSV files...")
if err := writeResults(epoch1Results, "epoch1_miners.csv"); err != nil {
log.Printf("Error writing epoch 1 results: %v", err)
} else {
log.Printf("Successfully wrote epoch1_miners.csv with %d records", len(epoch1Results))
}
if err := writeResults(epoch2Results, "epoch2_miners.csv"); err != nil {
log.Printf("Error writing epoch 2 results: %v", err)
} else {
log.Printf("Successfully wrote epoch2_miners.csv with %d records", len(epoch2Results))
}
log.Println("Analysis complete!")
}