blob: bccb103fff704fe16c85b90600d09d7301f29c1b [file] [log] [blame]
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// WARNING: This file is machine generated by fidlc.
#pragma once
#include <zircon/compiler.h>
#include <zircon/device/nand.h>
#include <zircon/types.h>
__BEGIN_CDECLS;
// Forward declarations
// NandOperation's are submitted for processing via the queue() method of the
// Nand Protocol. Once submitted, the contents of the NandOperation may be modified
// while it's being processed.
// The completion_cb() must eventually be called upon success or failure and
// at that point the cookie field must contain whatever value was in it when
// the NandOperation was originally queued.
// Any mention of "in pages" in this file means nand pages, as reported by
// nand_info.page_size, as opposed to physical memory pages (RAM). That's true
// even for vmo-related values.
// corrected_bit_flips are always related to nand_info.ecc_bits, so it is
// possible to obtain a value that is larger than what is being read (in the oob
// case). On the other hand, if errors cannot be corrected, the operation will
// fail, and corrected_bit_flips will be undefined.
// NOTE: The protocol can be extended with barriers to support controllers that
// may issue multiple simultaneous request to the IO chips.
typedef uint32_t nand_op_t;
#define NAND_OP_READ UINT32_C(1)
#define NAND_OP_WRITE UINT32_C(2)
#define NAND_OP_ERASE UINT32_C(3)
typedef struct nand_read_write nand_read_write_t;
typedef struct nand_erase nand_erase_t;
typedef union nand_operation nand_operation_t;
typedef struct nand_protocol nand_protocol_t;
typedef void (*nand_queue_callback)(void* ctx, zx_status_t s, nand_operation_t* op);
// Declarations
// A single operation can read or write an arbitrary number of pages,
// including out of band (OOB) data for each page. If either regular
// data or OOB is not required, the relevant VMO handle should be set to
// ZX_HANDLE_INVALID.
// Note that length dictates the number of pages to access, regardless
// of the type of data requested: regular data, OOB or both.
// The OOB data will be copied to (and from) a contiguous memory range
// starting at the given offset. Note that said offset is given in nand
// pages even though OOB is just a handful of bytes per page. In other
// words, after said offset, the OOB data for each page is located
// nand_info.oob_size bytes apart.
// For example, to read 5 pages worth of data + OOB, with page size of
// 2 kB and 16 bytes of OOB per page, setting:
// data_vmo = oob_vmo = vmo_handle
// length = 5
// offset_nand = 20
// offset_data_vmo = 0
// offset_oob_vmo = 5
// will transfer pages [20, 24] to the first 2048 * 5 bytes of the vmo,
// followed by 16 * 5 bytes of OOB data starting at offset 2048 * 5.
struct nand_read_write {
// Command.
nand_op_t command;
// vmo of data to read or write.
zx_handle_t data_vmo;
// vmo of OOB data to read or write.
zx_handle_t oob_vmo;
// Number of pages to access.
// (0 is invalid).
uint32_t length;
// Offset into nand, in pages.
uint32_t offset_nand;
// Data vmo offset in (nand) pages.
uint64_t offset_data_vmo;
// OOB vmo offset in (nand) pages.
uint64_t offset_oob_vmo;
// Optional physical page list.
uint64_t* page_list;
size_t page_count;
// Return value from READ_DATA, max corrected bit flips in any
// underlying ECC chunk read. The caller can compare this value
// against ecc_bits to decide whether the nand erase block needs to
// be recycled.
uint32_t corrected_bit_flips;
};
struct nand_erase {
// Command.
nand_op_t command;
// Offset into nand, in erase blocks.
uint32_t first_block;
// Number of blocks to erase.
// (0 is invalid).
uint32_t num_blocks;
};
union nand_operation {
// All Commands.
nand_op_t command;
// NAND_OP_READ, NAND_OP_WRITE.
nand_read_write_t rw;
// NAND_OP_ERASE.
nand_erase_t erase;
};
typedef struct nand_protocol_ops {
void (*query)(void* ctx, nand_info_t* out_info, size_t* out_nand_op_size);
void (*queue)(void* ctx, nand_operation_t* op, nand_queue_callback callback, void* cookie);
zx_status_t (*get_factory_bad_block_list)(void* ctx, uint32_t* out_bad_blocks_list,
size_t bad_blocks_count,
size_t* out_bad_blocks_actual);
} nand_protocol_ops_t;
struct nand_protocol {
nand_protocol_ops_t* ops;
void* ctx;
};
// Obtains the parameters of the nand device (nand_info_t) and the required
// size of nand_op_t. The nand_op_t's submitted via queue() must have
// nand_op_size_out - sizeof(nand_op_t) bytes available at the end of the
// structure for the use of the driver.
static inline void nand_query(const nand_protocol_t* proto, nand_info_t* out_info,
size_t* out_nand_op_size) {
proto->ops->query(proto->ctx, out_info, out_nand_op_size);
}
// Submits an IO request for processing. Success or failure will be reported
// via the completion_cb() in the nand_op_t. The callback may be called
// before the queue() method returns.
static inline void nand_queue(const nand_protocol_t* proto, nand_operation_t* op,
nand_queue_callback callback, void* cookie) {
proto->ops->queue(proto->ctx, op, callback, cookie);
}
// Gets the list of bad erase blocks, as reported by the nand manufacturer.
// The caller must allocate a table large enough to hold the expected number
// of entries, and pass the size of that table on |bad_block_len|.
// On return, |num_bad_blocks| contains the number of bad blocks found.
// This should only be called before writing any data to the nand, and the
// returned data should be saved somewhere else, along blocks that become
// bad after they've been in use.
static inline zx_status_t nand_get_factory_bad_block_list(const nand_protocol_t* proto,
uint32_t* out_bad_blocks_list,
size_t bad_blocks_count,
size_t* out_bad_blocks_actual) {
return proto->ops->get_factory_bad_block_list(proto->ctx, out_bad_blocks_list, bad_blocks_count,
out_bad_blocks_actual);
}
__END_CDECLS;