1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
|
/*
* Copyright (C) 2015 Matias Bjorling. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
*/
#include <linux/lightnvm.h>
#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
* enables ~1.5M updates per sysblk unit
*/
struct sysblk_scan {
/* A row is a collection of flash blocks for a system block. */
int nr_rows;
int row;
int act_blk[MAX_SYSBLKS];
int nr_ppas;
struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
};
static inline int scan_ppa_idx(int row, int blkid)
{
return (row * MAX_BLKS_PR_SYSBLK) + blkid;
}
void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
{
info->seqnr = be32_to_cpu(sb->seqnr);
info->erase_cnt = be32_to_cpu(sb->erase_cnt);
info->version = be16_to_cpu(sb->version);
strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
}
void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
{
sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
sb->seqnr = cpu_to_be32(info->seqnr);
sb->erase_cnt = cpu_to_be32(info->erase_cnt);
sb->version = cpu_to_be16(info->version);
strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
}
static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
{
int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
int i;
for (i = 0; i < nr_rows; i++)
sysblk_ppas[i].ppa = 0;
/* if possible, place sysblk at first channel, middle channel and last
* channel of the device. If not, create only one or two sys blocks
*/
switch (dev->nr_chnls) {
case 2:
sysblk_ppas[1].g.ch = 1;
/* fall-through */
case 1:
sysblk_ppas[0].g.ch = 0;
break;
default:
sysblk_ppas[0].g.ch = 0;
sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
break;
}
return nr_rows;
}
void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
struct ppa_addr *sysblk_ppas)
{
memset(s, 0, sizeof(struct sysblk_scan));
s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
}
static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
u8 *blks, int nr_blks, void *private)
{
struct sysblk_scan *s = private;
int i, nr_sysblk = 0;
nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
if (nr_blks < 0)
return nr_blks;
for (i = 0; i < nr_blks; i++) {
if (blks[i] != NVM_BLK_T_HOST)
continue;
if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
pr_err("nvm: too many host blks\n");
return -EINVAL;
}
ppa.g.blk = i;
s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
s->nr_ppas++;
nr_sysblk++;
}
return 0;
}
static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
struct ppa_addr *ppas, nvm_bb_update_fn *fn)
{
struct ppa_addr dppa;
int i, ret = 0;
s->nr_ppas = 0;
for (i = 0; i < s->nr_rows; i++) {
dppa = generic_to_dev_addr(dev, ppas[i]);
s->row = i;
ret = dev->ops->get_bb_tbl(dev, dppa, fn, s);
if (ret) {
pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
ppas[i].g.ch,
ppas[i].g.blk);
return ret;
}
}
return ret;
}
/*
* scans a block for latest sysblk.
* Returns:
* 0 - newer sysblk not found. PPA is updated to latest page.
* 1 - newer sysblk found and stored in *cur. PPA is updated to
* next valid page.
* <0- error.
*/
static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
struct nvm_system_block *sblk)
{
struct nvm_system_block *cur;
int pg, ret, found = 0;
/* the full buffer for a flash page is allocated. Only the first of it
* contains the system block information
*/
cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
if (!cur)
return -ENOMEM;
/* perform linear scan through the block */
for (pg = 0; pg < dev->lps_per_blk; pg++) {
ppa->g.pg = ppa_to_slc(dev, pg);
ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
cur, dev->pfpg_size);
if (ret) {
if (ret == NVM_RSP_ERR_EMPTYPAGE) {
pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
ppa->g.ch,
ppa->g.lun,
ppa->g.blk,
ppa->g.pg);
break;
}
pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
ret,
ppa->g.ch,
ppa->g.lun,
ppa->g.blk,
ppa->g.pg);
break; /* if we can't read a page, continue to the
* next blk
*/
}
if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
ppa->g.ch,
ppa->g.lun,
ppa->g.blk,
ppa->g.pg);
break; /* last valid page already found */
}
if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
continue;
memcpy(sblk, cur, sizeof(struct nvm_system_block));
found = 1;
}
kfree(cur);
return found;
}
static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
{
struct nvm_rq rqd;
int ret;
if (s->nr_ppas > dev->ops->max_phys_sect) {
pr_err("nvm: unable to update all sysblocks atomically\n");
return -EINVAL;
}
memset(&rqd, 0, sizeof(struct nvm_rq));
nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
nvm_generic_to_addr_mode(dev, &rqd);
ret = dev->ops->set_bb_tbl(dev, &rqd, type);
nvm_free_rqd_ppalist(dev, &rqd);
if (ret) {
pr_err("nvm: sysblk failed bb mark\n");
return -EINVAL;
}
return 0;
}
static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
u8 *blks, int nr_blks, void *private)
{
struct sysblk_scan *s = private;
struct ppa_addr *sppa;
int i, blkid = 0;
nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
if (nr_blks < 0)
return nr_blks;
for (i = 0; i < nr_blks; i++) {
if (blks[i] == NVM_BLK_T_HOST)
return -EEXIST;
if (blks[i] != NVM_BLK_T_FREE)
continue;
sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
sppa->g.ch = ppa.g.ch;
sppa->g.lun = ppa.g.lun;
sppa->g.blk = i;
s->nr_ppas++;
blkid++;
pr_debug("nvm: use (%u %u %u) as sysblk\n",
sppa->g.ch, sppa->g.lun, sppa->g.blk);
if (blkid > MAX_BLKS_PR_SYSBLK - 1)
return 0;
}
pr_err("nvm: sysblk failed get sysblk\n");
return -EINVAL;
}
static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
struct sysblk_scan *s)
{
struct nvm_system_block nvmsb;
void *buf;
int i, sect, ret = 0;
struct ppa_addr *ppas;
nvm_cpu_to_sysblk(&nvmsb, info);
buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
if (!ppas) {
ret = -ENOMEM;
goto err;
}
/* Write and verify */
for (i = 0; i < s->nr_rows; i++) {
ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
ppas[0].g.ch,
ppas[0].g.lun,
ppas[0].g.blk,
ppas[0].g.pg);
/* Expand to all sectors within a flash page */
if (dev->sec_per_pg > 1) {
for (sect = 1; sect < dev->sec_per_pg; sect++) {
ppas[sect].ppa = ppas[0].ppa;
ppas[sect].g.sec = sect;
}
}
ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
NVM_IO_SLC_MODE, buf, dev->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed program (%u %u %u)\n",
ppas[0].g.ch,
ppas[0].g.lun,
ppas[0].g.blk);
break;
}
ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
NVM_IO_SLC_MODE, buf, dev->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed read (%u %u %u)\n",
ppas[0].g.ch,
ppas[0].g.lun,
ppas[0].g.blk);
break;
}
if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
pr_err("nvm: sysblk failed verify (%u %u %u)\n",
ppas[0].g.ch,
ppas[0].g.lun,
ppas[0].g.blk);
ret = -EINVAL;
break;
}
}
kfree(ppas);
err:
kfree(buf);
return ret;
}
static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
{
int i, ret;
unsigned long nxt_blk;
struct ppa_addr *ppa;
for (i = 0; i < s->nr_rows; i++) {
nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
ppa->g.pg = ppa_to_slc(dev, 0);
ret = nvm_erase_ppa(dev, ppa, 1);
if (ret)
return ret;
s->act_blk[i] = nxt_blk;
}
return 0;
}
int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
{
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
struct nvm_system_block *cur;
int i, j, found = 0;
int ret = -ENOMEM;
/*
* 1. setup sysblk locations
* 2. get bad block list
* 3. filter on host-specific (type 3)
* 4. iterate through all and find the highest seq nr.
* 5. return superblock information
*/
if (!dev->ops->get_bb_tbl)
return -EINVAL;
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
if (ret)
goto err_sysblk;
/* no sysblocks initialized */
if (!s.nr_ppas)
goto err_sysblk;
cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
if (!cur)
goto err_sysblk;
/* find the latest block across all sysblocks */
for (i = 0; i < s.nr_rows; i++) {
for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
ret = nvm_scan_block(dev, &ppa, cur);
if (ret > 0)
found = 1;
else if (ret < 0)
break;
}
}
nvm_sysblk_to_cpu(info, cur);
kfree(cur);
err_sysblk:
mutex_unlock(&dev->mlock);
if (found)
return 1;
return ret;
}
int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
{
/* 1. for each latest superblock
* 2. if room
* a. write new flash page entry with the updated information
* 3. if no room
* a. find next available block on lun (linear search)
* if none, continue to next lun
* if none at all, report error. also report that it wasn't
* possible to write to all superblocks.
* c. write data to block.
*/
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
struct nvm_system_block *cur;
int i, j, ppaidx, found = 0;
int ret = -ENOMEM;
if (!dev->ops->get_bb_tbl)
return -EINVAL;
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
if (ret)
goto err_sysblk;
cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
if (!cur)
goto err_sysblk;
/* Get the latest sysblk for each sysblk row */
for (i = 0; i < s.nr_rows; i++) {
found = 0;
for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
ppaidx = scan_ppa_idx(i, j);
ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
if (ret > 0) {
s.act_blk[i] = j;
found = 1;
} else if (ret < 0)
break;
}
}
if (!found) {
pr_err("nvm: no valid sysblks found to update\n");
ret = -EINVAL;
goto err_cur;
}
/*
* All sysblocks found. Check that they have same page id in their flash
* blocks
*/
for (i = 1; i < s.nr_rows; i++) {
struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
if (l.g.pg != r.g.pg) {
pr_err("nvm: sysblks not on same page. Previous update failed.\n");
ret = -EINVAL;
goto err_cur;
}
}
/*
* Check that there haven't been another update to the seqnr since we
* began
*/
if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
pr_err("nvm: seq is not sequential\n");
ret = -EINVAL;
goto err_cur;
}
/*
* When all pages in a block has been written, a new block is selected
* and writing is performed on the new block.
*/
if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
dev->lps_per_blk - 1) {
ret = nvm_prepare_new_sysblks(dev, &s);
if (ret)
goto err_cur;
}
ret = nvm_write_and_verify(dev, new, &s);
err_cur:
kfree(cur);
err_sysblk:
mutex_unlock(&dev->mlock);
return ret;
}
int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
{
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
int ret;
/*
* 1. select master blocks and select first available blks
* 2. get bad block list
* 3. mark MAX_SYSBLKS block as host-based device allocated.
* 4. write and verify data to block
*/
if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
return -EINVAL;
if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
pr_err("nvm: memory does not support SLC access\n");
return -EINVAL;
}
/* Index all sysblocks and mark them as host-driven */
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
if (ret)
goto err_mark;
ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
if (ret)
goto err_mark;
/* Write to the first block of each row */
ret = nvm_write_and_verify(dev, info, &s);
err_mark:
mutex_unlock(&dev->mlock);
return ret;
}
struct factory_blks {
struct nvm_dev *dev;
int flags;
unsigned long *blks;
};
static int factory_nblks(int nblks)
{
/* Round up to nearest BITS_PER_LONG */
return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
}
static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
{
int nblks = factory_nblks(dev->blks_per_lun);
return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
BITS_PER_LONG;
}
static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
u8 *blks, int nr_blks, void *private)
{
struct factory_blks *f = private;
int i, lunoff;
nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
if (nr_blks < 0)
return nr_blks;
lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
/* non-set bits correspond to the block must be erased */
for (i = 0; i < nr_blks; i++) {
switch (blks[i]) {
case NVM_BLK_T_FREE:
if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
set_bit(i, &f->blks[lunoff]);
break;
case NVM_BLK_T_HOST:
if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
set_bit(i, &f->blks[lunoff]);
break;
case NVM_BLK_T_GRWN_BAD:
if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
set_bit(i, &f->blks[lunoff]);
break;
default:
set_bit(i, &f->blks[lunoff]);
break;
}
}
return 0;
}
static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
int max_ppas, struct factory_blks *f)
{
struct ppa_addr ppa;
int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
unsigned long *offset;
while (!done) {
done = 1;
for (ch = 0; ch < dev->nr_chnls; ch++) {
for (lun = 0; lun < dev->luns_per_chnl; lun++) {
idx = factory_blk_offset(dev, ch, lun);
offset = &f->blks[idx];
blkid = find_first_zero_bit(offset,
dev->blks_per_lun);
if (blkid >= dev->blks_per_lun)
continue;
set_bit(blkid, offset);
ppa.ppa = 0;
ppa.g.ch = ch;
ppa.g.lun = lun;
ppa.g.blk = blkid;
pr_debug("nvm: erase ppa (%u %u %u)\n",
ppa.g.ch,
ppa.g.lun,
ppa.g.blk);
erase_list[ppa_cnt] = ppa;
ppa_cnt++;
done = 0;
if (ppa_cnt == max_ppas)
return ppa_cnt;
}
}
}
return ppa_cnt;
}
static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
nvm_bb_update_fn *fn, void *priv)
{
struct ppa_addr dev_ppa;
int ret;
dev_ppa = generic_to_dev_addr(dev, ppa);
ret = dev->ops->get_bb_tbl(dev, dev_ppa, fn, priv);
if (ret)
pr_err("nvm: failed bb tbl for ch%u lun%u\n",
ppa.g.ch, ppa.g.blk);
return ret;
}
static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
{
int ch, lun, ret;
struct ppa_addr ppa;
ppa.ppa = 0;
for (ch = 0; ch < dev->nr_chnls; ch++) {
for (lun = 0; lun < dev->luns_per_chnl; lun++) {
ppa.g.ch = ch;
ppa.g.lun = lun;
ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
f);
if (ret)
return ret;
}
}
return 0;
}
int nvm_dev_factory(struct nvm_dev *dev, int flags)
{
struct factory_blks f;
struct ppa_addr *ppas;
int ppa_cnt, ret = -ENOMEM;
int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
GFP_KERNEL);
if (!f.blks)
return ret;
ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
if (!ppas)
goto err_blks;
f.dev = dev;
f.flags = flags;
/* create list of blks to be erased */
ret = nvm_fact_select_blks(dev, &f);
if (ret)
goto err_ppas;
/* continue to erase until list of blks until empty */
while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
nvm_erase_ppa(dev, ppas, ppa_cnt);
/* mark host reserved blocks free */
if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
sysblk_get_host_blks);
if (!ret)
ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
mutex_unlock(&dev->mlock);
}
err_ppas:
kfree(ppas);
err_blks:
kfree(f.blks);
return ret;
}
EXPORT_SYMBOL(nvm_dev_factory);
|