-
-
Notifications
You must be signed in to change notification settings - Fork 513
/
test_upgrades.py
818 lines (735 loc) · 40 KB
/
test_upgrades.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
import json
import shutil
from contextlib import ExitStack
from pathlib import Path
from sqlite3 import IntegrityError
from typing import Final
import pytest
from eth_utils.address import to_checksum_address
from freezegun import freeze_time
from rotkehlchen.assets.types import AssetType
from rotkehlchen.chain.ethereum.utils import should_update_protocol_cache
from rotkehlchen.constants.misc import GLOBALDB_NAME, GLOBALDIR_NAME
from rotkehlchen.db.drivers.gevent import DBConnection, DBConnectionType
from rotkehlchen.db.utils import table_exists
from rotkehlchen.errors.misc import DBUpgradeError
from rotkehlchen.globaldb.cache import (
globaldb_get_general_cache_keys_and_values_like,
globaldb_get_general_cache_last_queried_ts_by_key,
globaldb_get_unique_cache_value,
)
from rotkehlchen.globaldb.handler import GlobalDBHandler
from rotkehlchen.globaldb.schema import (
DB_CREATE_ASSET_COLLECTIONS,
DB_CREATE_LOCATION_ASSET_MAPPINGS,
DB_CREATE_LOCATION_UNSUPPORTED_ASSETS,
)
from rotkehlchen.globaldb.upgrades.manager import maybe_upgrade_globaldb
from rotkehlchen.globaldb.upgrades.v2_v3 import OTHER_EVM_CHAINS_ASSETS
from rotkehlchen.globaldb.upgrades.v3_v4 import (
MAKERDAO_ABI_GROUP_1,
MAKERDAO_ABI_GROUP_2,
MAKERDAO_ABI_GROUP_3,
YEARN_ABI_GROUP_1,
YEARN_ABI_GROUP_2,
YEARN_ABI_GROUP_3,
YEARN_ABI_GROUP_4,
)
from rotkehlchen.globaldb.upgrades.v5_v6 import V5_V6_UPGRADE_UNIQUE_CACHE_KEYS
from rotkehlchen.globaldb.utils import GLOBAL_DB_VERSION
from rotkehlchen.tests.fixtures.globaldb import create_globaldb
from rotkehlchen.tests.utils.globaldb import patch_for_globaldb_upgrade_to
from rotkehlchen.types import (
YEARN_VAULTS_V1_PROTOCOL,
CacheType,
ChainID,
EvmTokenKind,
Location,
Timestamp,
)
from rotkehlchen.utils.misc import ts_now
# TODO: Perhaps have a saved version of that global DB for the tests and query it too?
ASSETS_IN_V2_GLOBALDB: Final = 3095
YEARN_V1_ASSETS_IN_V3: Final = 32
def _count_sql_file_sentences(file_name: str, skip_statements: int = 0):
"""
Count the sql lines in scripts used during upgrades. If the skip_statements argument is
provided it ignores the [skip_statements first] statements and counts the rows for
[the skip_statements + 1] statement.
"""
insertions_made = 0
skipped_statements = 0
dir_path = Path(__file__).resolve().parent.parent.parent.parent
with open(dir_path / 'data' / file_name, encoding='utf8') as f:
insertions_made = 0
line = ' '
while line:
line = f.readline()
if skipped_statements < skip_statements:
if ';' in line:
skipped_statements += 1
continue
if skipped_statements == skip_statements and 'INSERT' in line:
insertions_made = 1
continue
insertions_made += 1
if ';' in line:
break
return insertions_made - 1
@pytest.mark.parametrize('globaldb_upgrades', [[]])
@pytest.mark.parametrize('reload_user_assets', [False])
@pytest.mark.parametrize('custom_globaldb', ['v2_global.db'])
@pytest.mark.parametrize('target_globaldb_version', [2])
def test_upgrade_v2_v3(globaldb: GlobalDBHandler):
"""Test globalDB upgrade v2->v3"""
# Check the state before upgrading
with globaldb.conn.read_ctx() as cursor:
assert globaldb.get_setting_value('version', 0) == 2
cursor.execute(
'SELECT COUNT(*) FROM price_history WHERE from_asset=? OR to_asset=?',
('BIFI', 'BIFI'),
)
assert cursor.fetchone()[0] == 2
# Check that the expected assets are present
ids_in_db = {row[0] for row in cursor.execute('SELECT * FROM user_owned_assets')}
assert ids_in_db == {
'_ceth_0x4E15361FD6b4BB609Fa63C81A2be19d873717870',
'_ceth_0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5',
'_ceth_0x7D1AfA7B718fb893dB30A3aBc0Cfc608AaCfeBB0',
'BTC',
'ETH',
'USD',
'EUR',
'BCH',
'BIFI',
}
# execute upgrade
with ExitStack() as stack:
patch_for_globaldb_upgrade_to(stack, 3)
maybe_upgrade_globaldb(
connection=globaldb.conn,
global_dir=globaldb._data_directory / GLOBALDIR_NAME, # type: ignore
db_filename=GLOBALDB_NAME,
)
assert globaldb.get_setting_value('version', 0) == 3
assets_inserted_by_update = _count_sql_file_sentences('globaldb_v2_v3_assets.sql')
with globaldb.conn.read_ctx() as cursor:
# test that we have the same number of assets before and after the migration
# So same assets as before plus the new ones we add via the sql file minus the ones we skip
actual_assets_num = cursor.execute('SELECT COUNT(*) from assets').fetchone()[0]
assert actual_assets_num == ASSETS_IN_V2_GLOBALDB + assets_inserted_by_update - len(OTHER_EVM_CHAINS_ASSETS) # noqa: E501
# Check that the properties of LUSD (ethereum token) have been correctly translated
weth_token_data = cursor.execute('SELECT identifier, token_kind, chain, address, decimals, protocol FROM evm_tokens WHERE address = "0x5f98805A4E8be255a32880FDeC7F6728C6568bA0"').fetchone() # noqa: E501
assert weth_token_data[0] == 'eip155:1/erc20:0x5f98805A4E8be255a32880FDeC7F6728C6568bA0'
assert EvmTokenKind.deserialize_from_db(weth_token_data[1]) == EvmTokenKind.ERC20
assert ChainID.deserialize_from_db(weth_token_data[2]) == ChainID.ETHEREUM
assert weth_token_data[3] == '0x5f98805A4E8be255a32880FDeC7F6728C6568bA0'
assert weth_token_data[4] == 18
assert weth_token_data[5] is None
weth_asset_data = cursor.execute('SELECT symbol, coingecko, cryptocompare, forked, started, swapped_for FROM common_asset_details WHERE identifier = "eip155:1/erc20:0x5f98805A4E8be255a32880FDeC7F6728C6568bA0"').fetchone() # noqa: E501
assert weth_asset_data[0] == 'LUSD'
assert weth_asset_data[1] == 'liquity-usd'
assert weth_asset_data[2] == 'LUSD'
assert weth_asset_data[3] is None
assert weth_asset_data[4] == 1617611299
assert weth_asset_data[5] is None
weth_asset_data = cursor.execute('SELECT name, type FROM assets WHERE identifier = "eip155:1/erc20:0x5f98805A4E8be255a32880FDeC7F6728C6568bA0"').fetchone() # noqa: E501
assert weth_asset_data[0] == 'LUSD Stablecoin'
assert AssetType.deserialize_from_db(weth_asset_data[1]) == AssetType.EVM_TOKEN
# Check that a normal asset also gets correctly mapped
weth_asset_data = cursor.execute('SELECT symbol, coingecko, cryptocompare, forked, started, swapped_for FROM common_asset_details WHERE identifier = "BCH"').fetchone() # noqa: E501
assert weth_asset_data[0] == 'BCH'
assert weth_asset_data[1] == 'bitcoin-cash'
assert weth_asset_data[2] is None
assert weth_asset_data[3] == 'BTC'
assert weth_asset_data[4] == 1501593374
assert weth_asset_data[5] is None
weth_asset_data = cursor.execute('SELECT name, type FROM assets WHERE identifier = "BCH"').fetchone() # noqa: E501
assert weth_asset_data[0] == 'Bitcoin Cash'
assert AssetType.deserialize_from_db(weth_asset_data[1]) == AssetType.OWN_CHAIN
ids_in_db = {row[0] for row in cursor.execute('SELECT * FROM user_owned_assets')}
assert ids_in_db == {
'eip155:1/erc20:0x4E15361FD6b4BB609Fa63C81A2be19d873717870',
'eip155:1/erc20:0x429881672B9AE42b8EbA0E26cD9C73711b891Ca5',
'eip155:1/erc20:0x7D1AfA7B718fb893dB30A3aBc0Cfc608AaCfeBB0',
'BTC',
'ETH',
'USD',
'EUR',
'BCH',
'eip155:56/erc20:0xCa3F508B8e4Dd382eE878A314789373D80A5190A',
}
# FLO asset is the one that is not an evm token but has `swapped_for` pointing to an evm
# token. Here we check that its `swapped_for` field is updated properly.
# 1. Check that FLO asset exists
flo_swapped_for = cursor.execute(
'SELECT swapped_for FROM common_asset_details WHERE identifier="FLO"',
).fetchone()
assert flo_swapped_for is not None
# 2. Check that its `swapped_for` was updated properly
found_assets = cursor.execute(
'SELECT COUNT(*) FROM assets WHERE identifier = ?', (flo_swapped_for[0],),
).fetchone()[0]
# should have found one asset that FLO's swapped_for is pointing to
assert found_assets == 1
# Check that new evm tokens have been correctly upgraded in price_history. Checking BIFI
cursor.execute('SELECT price FROM price_history WHERE from_asset == "eip155:56/erc20:0xCa3F508B8e4Dd382eE878A314789373D80A5190A"') # noqa: E501
assert cursor.fetchone()[0] == '464.99'
cursor.execute('SELECT price FROM price_history WHERE to_asset == "eip155:56/erc20:0xCa3F508B8e4Dd382eE878A314789373D80A5190A"') # noqa: E501
assert cursor.fetchone()[0] == '0.00215058388'
cursor.execute(
'SELECT COUNT(*) FROM price_history WHERE from_asset=? OR to_asset=?',
('BIFI', 'BIFI'),
)
assert cursor.fetchone()[0] == 0
assert GlobalDBHandler.get_schema_version() == 3
@pytest.mark.parametrize('globaldb_upgrades', [[]])
@pytest.mark.parametrize('custom_globaldb', ['v3_global.db'])
@pytest.mark.parametrize('target_globaldb_version', [3])
@pytest.mark.parametrize('reload_user_assets', [False])
def test_upgrade_v3_v4(globaldb: GlobalDBHandler):
"""Test the global DB upgrade from v3 to v4"""
# Check the state before upgrading
with globaldb.conn.read_ctx() as cursor:
assert globaldb.get_setting_value('version', 0) == 3
cursor.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE type="table" and name IN (?, ?)',
('contract_abi', 'contract_data'),
)
assert cursor.fetchone()[0] == 0
cursor.execute('SELECT COUNT(*) from evm_tokens WHERE protocol="yearn-v1"')
assert cursor.fetchone()[0] == YEARN_V1_ASSETS_IN_V3
# execute upgrade
with ExitStack() as stack:
patch_for_globaldb_upgrade_to(stack, 4)
maybe_upgrade_globaldb(
connection=globaldb.conn,
global_dir=globaldb._data_directory / GLOBALDIR_NAME, # type: ignore
db_filename=GLOBALDB_NAME,
)
assert globaldb.get_setting_value('version', 0) == 4
with globaldb.conn.read_ctx() as cursor:
cursor.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE type="table" and name IN (?, ?)',
('contract_abi', 'contract_data'),
)
assert cursor.fetchone()[0] == 2
expected_contracts_length = 93 - 1 + 1 + 3 + 1 # len(eth_contracts) + 1 in dxdao file -1 by removing multicall1 + 3 the new optimism contracts + by adding liquity staking # noqa: E501
cursor.execute('SELECT COUNT(*) FROM contract_data')
assert cursor.fetchone()[0] == expected_contracts_length
groups = [MAKERDAO_ABI_GROUP_1, MAKERDAO_ABI_GROUP_2, MAKERDAO_ABI_GROUP_3, YEARN_ABI_GROUP_1, YEARN_ABI_GROUP_2, YEARN_ABI_GROUP_3, YEARN_ABI_GROUP_4] # noqa: E501
cursor.execute('SELECT COUNT(*) FROM contract_abi')
assert cursor.fetchone()[0] == ( # len(eth_abi) + contracts_length - uniswap_NFT_MANAGER - 3 optimism contracts that share ABI with mainnet - len(7 abi groups) + 7 # noqa: E501
15 +
expected_contracts_length - 1 - 3 -
sum(len(x) for x in groups) +
len(groups)
)
# check balance scan, multicall and ds proxy registry are fine and in both chains
cursor.execute('SELECT id from contract_abi WHERE name=?', ('BALANCE_SCAN',))
balancescan_abi_id = cursor.fetchone()[0]
cursor.execute('SELECT id from contract_abi WHERE name=?', ('MULTICALL2',))
multicall_abi_id = cursor.fetchone()[0]
cursor.execute('SELECT id from contract_abi WHERE name=?', ('DS_PROXY_REGISTRY',))
ds_registry_abi_id = cursor.fetchone()[0]
result = cursor.execute(
'SELECT address, chain_id, abi, deployed_block FROM contract_data WHERE name=? ORDER BY chain_id', # noqa: E501
('BALANCE_SCAN',),
).fetchall()
assert result == [
('0x86F25b64e1Fe4C5162cDEeD5245575D32eC549db', 1, balancescan_abi_id, 9665853),
('0x1e21bc42FaF802A0F115dC998e2F0d522aDb1F68', 10, balancescan_abi_id, 46787373),
]
result = cursor.execute(
'SELECT address, chain_id, abi, deployed_block FROM contract_data WHERE name=? ORDER BY chain_id', # noqa: E501
('MULTICALL2',),
).fetchall()
assert result == [
('0x5BA1e12693Dc8F9c48aAD8770482f4739bEeD696', 1, multicall_abi_id, 12336033),
('0x2DC0E2aa608532Da689e89e237dF582B783E552C', 10, multicall_abi_id, 722566),
]
result = cursor.execute(
'SELECT address, chain_id, abi, deployed_block FROM contract_data WHERE name=? ORDER BY chain_id', # noqa: E501
('DS_PROXY_REGISTRY',),
).fetchall()
assert result == [
('0x4678f0a6958e4D2Bc4F1BAF7Bc52E8F3564f3fE4', 1, ds_registry_abi_id, 5834629),
('0x283Cc5C26e53D66ed2Ea252D986F094B37E6e895', 10, ds_registry_abi_id, 2944824),
]
# check that old names are not in there
assert cursor.execute(
'SELECT COUNT(*) from contract_abi WHERE name in (?, ?, ?)',
('ETH_SCAN', 'ETH_MULTICALL', 'ETH_MULTICALL2'),
).fetchone()[0] == 0
assert cursor.execute(
'SELECT COUNT(*) from contract_data WHERE name in (?, ?, ?)',
('ETH_SCAN', 'ETH_MULTICALL', 'ETH_MULTICALL2'),
).fetchone()[0] == 0
cursor.execute('SELECT COUNT(*) FROM asset_collections')
assert cursor.fetchone()[0] == _count_sql_file_sentences('populate_asset_collections.sql')
cursor.execute('SELECT COUNT(*) FROM multiasset_mappings')
assert cursor.fetchone()[0] == _count_sql_file_sentences('populate_multiasset_mappings.sql') # noqa: E501
assert GlobalDBHandler.get_schema_version() == 4
# test that the blockchain column is nullable
cursor.execute('INSERT INTO address_book(address, blockchain, name) VALUES ("0xc37b40ABdB939635068d3c5f13E7faF686F03B65", NULL, "yabir everywhere")') # noqa: E501
# test that address book entries were kept
cursor.execute('SELECT * FROM address_book')
assert cursor.fetchall() == [
('0xc37b40ABdB939635068d3c5f13E7faF686F03B65', 'ETH', 'yabir secret account'),
('0x2B888954421b424C5D3D9Ce9bB67c9bD47537d12', 'ETH', 'lefteris GTC'),
('0xc37b40ABdB939635068d3c5f13E7faF686F03B65', None, 'yabir everywhere'),
]
# check that yearn tokens got their protocol updated
cursor.execute('SELECT COUNT(*) from evm_tokens WHERE protocol="yearn-v1"')
assert cursor.fetchone()[0] == 0
cursor.execute('SELECT COUNT(*) from evm_tokens WHERE protocol=?', (YEARN_VAULTS_V1_PROTOCOL,)) # noqa: E501
assert cursor.fetchone()[0] == YEARN_V1_ASSETS_IN_V3
@pytest.mark.parametrize('globaldb_upgrades', [[]])
@pytest.mark.parametrize('custom_globaldb', ['v4_global.db'])
@pytest.mark.parametrize('target_globaldb_version', [4])
@pytest.mark.parametrize('reload_user_assets', [False])
@freeze_time('2023-03-20') # freezing time just to make sure comparisons of timestamps won't fail
def test_upgrade_v4_v5(globaldb: GlobalDBHandler):
"""Test the global DB upgrade from v4 to v5"""
# Check the state before upgrading
with globaldb.conn.read_ctx() as cursor:
assert globaldb.get_setting_value('version', 0) == 4
cursor.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE type="table" and name=?',
('default_rpc_nodes',),
)
assert cursor.fetchone()[0] == 0
last_queried_ts = globaldb_get_general_cache_last_queried_ts_by_key(
cursor=cursor,
key_parts=(CacheType.CURVE_LP_TOKENS,),
)
assert last_queried_ts == Timestamp(1676727187) # 1676727187 is just some random value in the db # noqa: E501
pool_tokens_in_global_db = globaldb_get_general_cache_keys_and_values_like(
cursor=cursor,
key_parts=(CacheType.CURVE_POOL_TOKENS,),
)
assert len(pool_tokens_in_global_db) > 0, 'There should be some pool tokens set'
contracts_before_upgrade = cursor.execute(
'SELECT address, chain_id, abi, deployed_block FROM contract_data',
).fetchall()
assert len(contracts_before_upgrade) > 0, 'There should be some contracts'
columns = [column_data[1] for column_data in cursor.execute('PRAGMA table_info(contract_data)')] # noqa: E501
assert 'name' in columns, 'The name column should be in the contract_data table'
# execute upgrade
with ExitStack() as stack:
patch_for_globaldb_upgrade_to(stack, 5)
maybe_upgrade_globaldb(
connection=globaldb.conn,
global_dir=globaldb._data_directory / GLOBALDIR_NAME, # type: ignore
db_filename=GLOBALDB_NAME,
)
assert globaldb.get_setting_value('version', 0) == 5
with globaldb.conn.read_ctx() as cursor:
cursor.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE type="table" and name=?',
('default_rpc_nodes',),
)
assert cursor.fetchone()[0] == 1
cursor.execute('SELECT COUNT(*) FROM default_rpc_nodes')
assert cursor.fetchone()[0] == 10
# check that we have five nodes for each chain
nodes_file_path = Path(__file__).resolve().parent.parent.parent.parent / 'data' / 'nodes.json' # noqa: E501
nodes_info = json.loads(nodes_file_path.read_text(encoding='utf8'))
nodes_tuples_from_file = [
(idx, node['name'], node['endpoint'], int(False), int(True), str(node['weight']), node['blockchain']) # noqa: E501
for idx, node in enumerate(nodes_info, start=1)
]
nodes_tuples_from_db = cursor.execute('SELECT * FROM default_rpc_nodes').fetchall()
assert nodes_tuples_from_db == nodes_tuples_from_file
last_queried_ts = globaldb_get_general_cache_last_queried_ts_by_key(
cursor=cursor,
key_parts=(CacheType.CURVE_LP_TOKENS,),
)
assert last_queried_ts == Timestamp(0)
pool_tokens_in_global_db = globaldb_get_general_cache_keys_and_values_like(
cursor=cursor,
key_parts=(CacheType.CURVE_POOL_TOKENS,),
)
assert len(pool_tokens_in_global_db) == 0, 'All curve pool tokens should have been deleted'
contracts_after_upgrade = cursor.execute('SELECT * FROM contract_data').fetchall()
assert contracts_after_upgrade == contracts_before_upgrade
columns = [column_data[1] for column_data in cursor.execute('PRAGMA table_info(contract_data)')] # noqa: E501
assert 'name' not in columns, 'The name column should not be in the contract_data table'
@pytest.mark.parametrize('globaldb_upgrades', [[]])
@pytest.mark.parametrize('custom_globaldb', ['v5_global.db'])
@pytest.mark.parametrize('target_globaldb_version', [5])
@pytest.mark.parametrize('reload_user_assets', [False])
def test_upgrade_v5_v6(globaldb: GlobalDBHandler):
"""Test the global DB upgrade from v5 to v6"""
# Check the state before upgrading
with globaldb.conn.read_ctx() as cursor:
# check that unique_cache table is not present in the database
cursor.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE type="table" and name=?',
('unique_cache',),
)
assert cursor.fetchone()[0] == 0
# get number of entries in general_cache before upgrade
cursor.execute('SELECT COUNT(*) FROM general_cache')
gen_cache_content_before = cursor.fetchone()[0]
# check that unique_cache_keys content are still in general_cache
unique_keys_to_num_entries = {}
gen_cache_unique_key_content = 0
for key_part in V5_V6_UPGRADE_UNIQUE_CACHE_KEYS:
cursor.execute(
'SELECT COUNT(*) FROM general_cache WHERE key LIKE ?',
(f'{key_part.serialize()}%',),
)
unique_keys_to_num_entries[key_part] = int(cursor.fetchone()[0])
gen_cache_unique_key_content += unique_keys_to_num_entries[key_part]
# create a set of the old entries ensuring that the rows have checksummed addresses
# for the identifiers in multiasset_mappings
old_multiasset_mappings = set()
cursor.execute('SELECT * FROM multiasset_mappings ORDER BY collection_id, asset')
for collection_id, asset_id in cursor:
address = asset_id.split(':')[-1]
checksummed_address = to_checksum_address(address)
new_id = asset_id.replace(address, checksummed_address)
old_multiasset_mappings.add((collection_id, new_id))
with globaldb.conn.write_ctx() as write_cursor:
# add some dummy data into cache to verify behaviour during transfer.
test_cache_key = next(iter(V5_V6_UPGRADE_UNIQUE_CACHE_KEYS)).serialize() + 'test'
values = ['abc', 'xyz', '123']
tuples = [(test_cache_key, value, ts_now()) for value in values]
write_cursor.executemany(
'INSERT OR REPLACE INTO general_cache '
'(key, value, last_queried_ts) VALUES (?, ?, ?)',
tuples,
)
# test you can add multiple combos of collection id and asset before upgrade
write_cursor.execute(
'INSERT INTO multiasset_mappings(collection_id, asset) VALUES(?, ?)',
(7, 'eip155:1/erc20:0xD46bA6D942050d489DBd938a2C909A5d5039A161'),
)
write_cursor.execute( # delete to preserve correctness and add 1 back
'DELETE FROM multiasset_mappings WHERE collection_id=? AND asset=?',
(7, 'eip155:1/erc20:0xD46bA6D942050d489DBd938a2C909A5d5039A161'),
)
write_cursor.execute(
'INSERT INTO multiasset_mappings(collection_id, asset) VALUES(?, ?)',
(7, 'eip155:1/erc20:0xD46bA6D942050d489DBd938a2C909A5d5039A161'),
)
# execute upgrade
with ExitStack() as stack:
patch_for_globaldb_upgrade_to(stack, 6)
maybe_upgrade_globaldb(
connection=globaldb.conn,
global_dir=globaldb._data_directory / GLOBALDIR_NAME, # type: ignore
db_filename=GLOBALDB_NAME,
)
assert globaldb.get_setting_value('version', 0) == 6
with globaldb.conn.read_ctx() as cursor:
# check that unique_cache table is present in the database
cursor.execute(
'SELECT COUNT(*) FROM sqlite_master WHERE type="table" and name=?',
('unique_cache',),
)
assert cursor.fetchone()[0] == 1
# check that of dummy entry, only first value is transferred to unique_cache
value = globaldb_get_unique_cache_value(cursor, (next(iter(V5_V6_UPGRADE_UNIQUE_CACHE_KEYS)), 'test')) # type: ignore # noqa: E501
assert value == values[0]
with globaldb.conn.write_ctx() as write_cursor:
# delete dummy entry to maintain db consistency
write_cursor.execute('DELETE FROM unique_cache WHERE key=?', (test_cache_key,))
# Check we can't add already existing collection_id + asset to multiasset_mappings
with pytest.raises(IntegrityError):
write_cursor.execute(
'INSERT INTO multiasset_mappings(collection_id, asset) VALUES(?, ?)',
(7, 'eip155:1/erc20:0xD46bA6D942050d489DBd938a2C909A5d5039A161'),
)
# check that appropriate cache data is transfered
for key_part in V5_V6_UPGRADE_UNIQUE_CACHE_KEYS:
cursor.execute(
'SELECT COUNT(*) FROM unique_cache WHERE key LIKE ?',
(f'{key_part.serialize()}%',),
)
assert cursor.fetchone()[0] == unique_keys_to_num_entries[key_part]
cursor.execute(
'SELECT COUNT(*) FROM general_cache WHERE key LIKE ?',
(f'{key_part.serialize()}%',),
)
assert cursor.fetchone()[0] == 0
unique_cache_content = cursor.execute('SELECT COUNT(*) FROM unique_cache').fetchone()[0]
# get number of entries in general_cache after upgrade
gen_cache_content_after = cursor.execute('SELECT COUNT(*) FROM general_cache').fetchone()[0] # noqa: E501
assert gen_cache_unique_key_content == unique_cache_content
assert gen_cache_content_before == gen_cache_content_after + gen_cache_unique_key_content
assert set(cursor.execute(
'SELECT * FROM multiasset_mappings ORDER BY collection_id, asset',
)) == old_multiasset_mappings
# test that the VELO asset got deleted
assert cursor.execute(
'SELECT COUNT(*) FROM assets WHERE identifier=?',
('VELO',),
).fetchone()[0] == 0
assert cursor.execute(
'SELECT COUNT(*) FROM common_asset_details WHERE identifier=?',
('VELO',),
).fetchone()[0] == 0
@pytest.mark.parametrize('globaldb_upgrades', [[]])
@pytest.mark.parametrize('custom_globaldb', ['v6_global.db'])
@pytest.mark.parametrize('target_globaldb_version', [6])
@pytest.mark.parametrize('reload_user_assets', [False])
def test_upgrade_v6_v7(globaldb: GlobalDBHandler):
"""Test the global DB upgrade from v6 to v7"""
# Check the state before upgrading
with globaldb.conn.read_ctx() as cursor:
# check that location_asset_mappings table is not present in the database
assert table_exists(
cursor=cursor,
name='location_asset_mappings',
) is False
# check that location_unsupported_assets table is not present in the database
assert table_exists(
cursor=cursor,
name='location_unsupported_assets',
) is False
# execute upgrade
with ExitStack() as stack:
patch_for_globaldb_upgrade_to(stack, 7)
maybe_upgrade_globaldb(
connection=globaldb.conn,
global_dir=globaldb._data_directory / GLOBALDIR_NAME, # type: ignore
db_filename=GLOBALDB_NAME,
)
assert globaldb.get_setting_value('version', 0) == 7
with globaldb.conn.read_ctx() as cursor:
# check that location_asset_mappings table is present in the database
assert table_exists(
cursor=cursor,
name='location_asset_mappings',
schema=DB_CREATE_LOCATION_ASSET_MAPPINGS,
) is True
# check that location_unsupported_assets table is present in the database
assert table_exists(
cursor=cursor,
name='location_unsupported_assets',
schema=DB_CREATE_LOCATION_UNSUPPORTED_ASSETS,
) is True
# check that correct number of exchanges mappings are present.
# exact values can be tested by pasting this gist here and running it
# https://gist.github.com/OjusWiZard/0a9544ac4e985be08736cc3296e3e0d3
for exchange, expected_mappings_count in (
(None, 33),
(Location.KRAKEN.serialize_for_db(), 228),
(Location.POLONIEX.serialize_for_db(), 147),
(Location.BITTREX.serialize_for_db(), 121),
(Location.BINANCE.serialize_for_db(), 135),
(Location.COINBASE.serialize_for_db(), 78),
(Location.COINBASEPRO.serialize_for_db(), 82),
(Location.GEMINI.serialize_for_db(), 26),
(Location.CRYPTOCOM.serialize_for_db(), 0),
(Location.BITSTAMP.serialize_for_db(), 16),
(Location.BITFINEX.serialize_for_db(), 60),
(Location.ICONOMI.serialize_for_db(), 36),
(Location.KUCOIN.serialize_for_db(), 233),
(Location.FTX.serialize_for_db(), 45),
(Location.NEXO.serialize_for_db(), 2),
(Location.BLOCKFI.serialize_for_db(), 5),
(Location.UPHOLD.serialize_for_db(), 37),
(Location.BITPANDA.serialize_for_db(), 41),
(Location.OKX.serialize_for_db(), 65),
(Location.WOO.serialize_for_db(), 32),
(Location.BYBIT.serialize_for_db(), 78),
):
assert cursor.execute(
'SELECT COUNT(*) FROM location_asset_mappings WHERE location IS ?', (exchange,),
).fetchone()[0] == expected_mappings_count
# check that correct number of unsupported assets are present.
# exact values can be tested by pasting this gist here and running it
# https://gist.github.com/OjusWiZard/0a9544ac4e985be08736cc3296e3e0d3
for location, expected_mappings_count in (
(Location.BINANCE.serialize_for_db(), 22),
(Location.BITFINEX.serialize_for_db(), 10),
(Location.BITTREX.serialize_for_db(), 125),
(Location.FTX.serialize_for_db(), 65),
(Location.GEMINI.serialize_for_db(), 10),
(Location.ICONOMI.serialize_for_db(), 4),
(Location.KUCOIN.serialize_for_db(), 234),
(Location.OKX.serialize_for_db(), 7),
(Location.POLONIEX.serialize_for_db(), 133),
):
assert cursor.execute(
'SELECT COUNT(*) FROM location_unsupported_assets WHERE location = ?', (location,),
).fetchone()[0] == expected_mappings_count
@pytest.mark.parametrize('globaldb_upgrades', [[]])
@pytest.mark.parametrize('custom_globaldb', ['v7_global.db'])
@pytest.mark.parametrize('target_globaldb_version', [7])
@pytest.mark.parametrize('reload_user_assets', [False])
def test_upgrade_v7_v8(globaldb: GlobalDBHandler):
"""Test the global DB upgrade from v7 to v8"""
# Check the state before upgrading
with globaldb.conn.read_ctx() as cursor:
assert cursor.execute(
'SELECT protocol from evm_tokens where identifier=?',
('eip155:1/erc20:0xA0b73E1Ff0B80914AB6fe0444E65848C4C34450b',),
).fetchone()[0] == 'spam'
assert cursor.execute(
'SELECT COUNT(*) from general_cache where key=? AND value=?',
('SPAM_ASSET_FALSE_POSITIVE', 'eip155:1/erc20:0xA0b73E1Ff0B80914AB6fe0444E65848C4C34450b'), # noqa: E501
).fetchone()[0] == 0
assert cursor.execute(
'SELECT COUNT(*) FROM contract_data WHERE address=?',
('0xAB392016859663Ce1267f8f243f9F2C02d93bad8',),
).fetchone()[0] == 1
# check that asset asset_collections table have duplicated entries
unique_entries = {}
duplicated_entries = set()
for collection_id, name, symbol in cursor.execute('SELECT id, name, symbol FROM asset_collections;'): # noqa: E501
if (name, symbol) not in unique_entries:
unique_entries[(name, symbol)] = collection_id
else:
duplicated_entries.add((collection_id, name, symbol))
v7_multiasset_mappings = cursor.execute(
'SELECT rowid, collection_id, asset FROM multiasset_mappings;',
).fetchall()
cached_lp_tokens = set(cursor.execute('SELECT value FROM general_cache WHERE key LIKE "CURVE_LP_TOKENS%"').fetchall()) # noqa: E501
cached_pool_tokens = set(cursor.execute('SELECT value FROM general_cache WHERE key LIKE "CURVE_POOL_TOKENS%"').fetchall()) # noqa: E501
cached_underlying_tokens = set(cursor.execute('SELECT value FROM general_cache WHERE key LIKE "CURVE_POOL_UNDERLYING_TOKENS%"').fetchall()) # noqa: E501
cached_gauges = set(cursor.execute('SELECT value FROM unique_cache WHERE key LIKE "CURVE_GAUGE_ADDRESS%"').fetchall()) # noqa: E501
cached_pools = set(cursor.execute('SELECT value FROM unique_cache WHERE key LIKE "CURVE_POOL_ADDRESS%"').fetchall()) # noqa: E501
assert len(cached_lp_tokens) == 973
assert len(cached_pool_tokens) == 555
assert len(cached_underlying_tokens) == 202
assert len(cached_gauges) == 536
assert len(cached_pools) == 973
# cache with new keys doesn't exist yet
assert cursor.execute('SELECT COUNT(*) FROM general_cache WHERE key LIKE "CURVE_LP_TOKENS1%"').fetchone()[0] == 0 # noqa: E501
assert cursor.execute('SELECT COUNT(*) FROM general_cache WHERE key LIKE "CURVE_POOL_TOKENS1%"').fetchone()[0] == 0 # noqa: E501
assert cursor.execute('SELECT COUNT(*) FROM unique_cache WHERE key LIKE "CURVE_GAUGE_ADDRESS1%"').fetchone()[0] == 0 # noqa: E501
assert cursor.execute('SELECT COUNT(*) FROM unique_cache WHERE key LIKE "CURVE_POOL_ADDRESS1%"').fetchone()[0] == 0 # noqa: E501
# before update, the cache is not eligible to refresh, because last_queried_ts is ts_now()
cursor.execute('UPDATE general_cache SET last_queried_ts=? WHERE key LIKE ?', (ts_now(), 'CURVE_LP_TOKENS%')) # noqa: E501
assert should_update_protocol_cache(CacheType.CURVE_LP_TOKENS) is False
assert unique_entries['Wormhole Token', 'W'] == 263
assert unique_entries['TokenFi', 'TOKEN'] == 264
assert unique_entries['HTX', 'HTX'] == 265
assert unique_entries['Kyber Network Crystal v2', 'KNC'] == 301
assert unique_entries['PancakeSwap Token', 'CAKE'] == 302
assert (262, 'Starknet', 'STRK') in duplicated_entries
assert (303, 'Starknet', 'STRK') in duplicated_entries
assert (304, 'Wormhole Token', 'W') in duplicated_entries
assert (305, 'TokenFi', 'TOKEN') in duplicated_entries
assert (306, 'HTX', 'HTX') in duplicated_entries
# execute upgrade
with ExitStack() as stack:
patch_for_globaldb_upgrade_to(stack, 8)
maybe_upgrade_globaldb(
connection=globaldb.conn,
global_dir=globaldb._data_directory / GLOBALDIR_NAME, # type: ignore
db_filename=GLOBALDB_NAME,
)
assert globaldb.get_setting_value('version', 0) == 8
with globaldb.conn.read_ctx() as cursor:
assert cursor.execute(
'SELECT protocol from evm_tokens where identifier=?',
('eip155:1/erc20:0xA0b73E1Ff0B80914AB6fe0444E65848C4C34450b',),
).fetchone()[0] is None
assert cursor.execute(
'SELECT COUNT(*) from general_cache where key=? AND value=?',
('SPAM_ASSET_FALSE_POSITIVE', 'eip155:1/erc20:0xA0b73E1Ff0B80914AB6fe0444E65848C4C34450b'), # noqa: E501
).fetchone()[0] == 1
# check that asset_collections have unique entries now with correctly mapped ids
assert table_exists(
cursor=cursor,
name='asset_collections',
schema=DB_CREATE_ASSET_COLLECTIONS,
) is True
for collection_id, name, symbol in (
(260, 'Moon App', 'APP'),
(261, 'Starknet', 'STRK'),
(262, 'Wormhole Token', 'W'),
(263, 'TokenFi', 'TOKEN'),
(264, 'HTX', 'HTX'),
(300, 'Kyber Network Crystal v2', 'KNC'),
(301, 'PancakeSwap Token', 'CAKE'),
):
assert cursor.execute(
'SELECT COUNT(*) FROM asset_collections WHERE id=? AND name=? AND symbol=?',
(collection_id, name, symbol),
).fetchone()[0] == 1
# and multiasset_mappings are exactly same
assert v7_multiasset_mappings == cursor.execute(
'SELECT rowid, collection_id, asset FROM multiasset_mappings;',
).fetchall()
assert cursor.execute(
'SELECT COUNT(*) FROM contract_data WHERE address=?',
('0xAB392016859663Ce1267f8f243f9F2C02d93bad8',),
).fetchone()[0] == 0
assert cursor.execute(
'SELECT COUNT(*) FROM contract_data WHERE address=?',
('0xc97EE9490F4e3A3136A513DB38E3C7b47e69303B',),
).fetchone()[0] == 1
# previous keys were deleted
assert cursor.execute('SELECT COUNT(*) FROM general_cache WHERE key LIKE "CURVE_LP_TOKENS0x%"').fetchone()[0] == 0 # noqa: E501
assert cursor.execute('SELECT COUNT(*) FROM general_cache WHERE key LIKE "CURVE_POOL_TOKENS0x%"').fetchone()[0] == 0 # noqa: E501
assert cursor.execute('SELECT COUNT(*) FROM unique_cache WHERE key LIKE "CURVE_GAUGE_ADDRESS0x%"').fetchone()[0] == 0 # noqa: E501
assert cursor.execute('SELECT COUNT(*) FROM unique_cache WHERE key LIKE "CURVE_POOL_ADDRESS0x%"').fetchone()[0] == 0 # noqa: E501
# values are same as in v7 with key containing the chain id
assert set(cursor.execute('SELECT value FROM general_cache WHERE key LIKE "CURVE_LP_TOKENS1%"').fetchall()) == cached_lp_tokens # noqa: E501
assert set(cursor.execute('SELECT value FROM general_cache WHERE key LIKE "CURVE_POOL_TOKENS1%"').fetchall()) == cached_pool_tokens # noqa: E501
assert set(cursor.execute('SELECT value FROM unique_cache WHERE key LIKE "CURVE_GAUGE_ADDRESS1%"').fetchall()) == cached_gauges # noqa: E501
assert set(cursor.execute('SELECT value FROM unique_cache WHERE key LIKE "CURVE_POOL_ADDRESS1%"').fetchall()) == cached_pools # noqa: E501
# CURVE_POOL_UNDERLYING_TOKENS should be deleted
assert cursor.execute('SELECT COUNT(*) FROM general_cache WHERE key LIKE "CURVE_POOL_UNDERLYING_TOKENS%"').fetchone()[0] == 0 # noqa: E501
# ensure that now curve cache should be eligible to update
assert should_update_protocol_cache(CacheType.CURVE_LP_TOKENS, '1') is True
with (
pytest.raises(IntegrityError),
globaldb.conn.write_ctx() as write_cursor,
):
write_cursor.execute(
'INSERT INTO contract_abi(id, value, name) SELECT 100, value, "yabir" FROM contract_abi WHERE id=1', # noqa: E501
) # test that raises unique error when trying to copy an existing abi
@pytest.mark.parametrize('custom_globaldb', ['v2_global.db'])
@pytest.mark.parametrize('target_globaldb_version', [2])
@pytest.mark.parametrize('reload_user_assets', [False])
def test_unfinished_upgrades(globaldb: GlobalDBHandler):
assert globaldb.used_backup is False
globaldb.add_setting_value( # Pretend that an upgrade was started
name='ongoing_upgrade_from_version',
value=2,
)
# There are no backups, so it is supposed to raise an error
with pytest.raises(DBUpgradeError):
create_globaldb(globaldb._data_directory, 0)
globaldb.conn.execute('PRAGMA wal_checkpoint;') # flush the wal file
# Add a backup
backup_path = globaldb._data_directory / GLOBALDIR_NAME / f'{ts_now()}_global_db_v2.backup' # type: ignore # _data_directory is definitely not null here
shutil.copy(Path(__file__).parent.parent.parent / 'data' / 'v2_global.db', backup_path)
backup_connection = DBConnection(
path=str(backup_path),
connection_type=DBConnectionType.GLOBAL,
sql_vm_instructions_cb=0,
)
with backup_connection.write_ctx() as write_cursor:
write_cursor.execute('INSERT INTO settings VALUES("is_backup", "Yes")') # mark as a backup # noqa: E501
globaldb = create_globaldb(globaldb._data_directory, 0) # Now the backup should be used
assert globaldb.used_backup is True
# Check that there is no setting left
assert globaldb.get_setting_value('ongoing_upgrade_from_version', -1) == -1
with globaldb.conn.read_ctx() as cursor:
assert cursor.execute('SELECT value FROM settings WHERE name="is_backup"').fetchone()[0] == 'Yes' # noqa: E501
@pytest.mark.parametrize('globaldb_upgrades', [[]])
@pytest.mark.parametrize('custom_globaldb', ['v2_global.db'])
@pytest.mark.parametrize('target_globaldb_version', [2])
@pytest.mark.parametrize('reload_user_assets', [False])
def test_applying_all_upgrade(globaldb: GlobalDBHandler):
"""Test globalDB upgrade from v2 to latest"""
# Check the state before upgrading
assert globaldb.get_setting_value('version', 0) == 2
with globaldb.conn.cursor() as cursor:
assert cursor.execute('SELECT COUNT(*) from assets WHERE identifier="eip155:/erc20:0x32c6fcC9bC912C4A30cd53D2E606461e44B77AF2"').fetchone()[0] == 0 # noqa: E501
maybe_upgrade_globaldb(
connection=globaldb.conn,
global_dir=globaldb._data_directory / GLOBALDIR_NAME, # type: ignore
db_filename=GLOBALDB_NAME,
)
assert globaldb.get_setting_value('version', 0) == GLOBAL_DB_VERSION
with globaldb.conn.cursor() as cursor:
assert cursor.execute('SELECT COUNT(*) from assets WHERE identifier="eip155:/erc20:0x32c6fcC9bC912C4A30cd53D2E606461e44B77AF2"').fetchone()[0] == 1 # noqa: E501