drivers/misc/block_cache: Add block driver which can add caching to existing devices.

Adds an optional block device driver which can encapsulate and provide block based caching.
This commit is contained in:
Stuart Ianna 2023-09-05 12:25:05 +10:00
parent 4fe1458a43
commit dece509b2a
11 changed files with 1249 additions and 5 deletions

View file

@ -42,5 +42,17 @@ Block device drivers have these properties:
*Example*: See the ``cmd_dd()`` implementation in
``apps/nshlib/nsh_ddcmd.c``.
- **Caching Block Driver** Any instantiated block device can
be encapsulated in secondary device which uses a predefined
amount of available RAM to provide read-ahead and write buffering.
The secondary block driver then becomes the mountpoint for
the target filesystem. Beware that any data not flushed to the
underlying device, through ``umount`` or other filesystem
mechanisms will be lost if the device is removed unexpectedly.
See ``boards/risc-v/litex/arty_a7/src/litex_sdio.c`` and
``nuttx/nuttx/boards/risc-v/litex/arty_a7/Kconfig`` for example
usage and configuration.
- **Examples**. ``drivers/loop.c``,
``drivers/mmcsd/mmcsd_spi.c``, ``drivers/ramdisk.c``, etc.

View file

@ -45,9 +45,52 @@ config LITEX_SDIO_MOUNT
bool "Mount SDIO at startup"
default n
config LITEX_SDIO_USE_CACHE
bool "Use block caching"
default n
depends on LITEX_SDIO
depends on BLOCK_CACHE
---help---
Use block caching to improvce SD performance.
Warning: This may result in potential data loss if removable medio is detached
without flushing the cache, via umount or other mechanisms.
The total RAM consumed by caching can be calculated using:
B * W * C
where B = the underlying device's block size, W = the cache width and C is
the number of caches.
if LITEX_SDIO_USE_CACHE
config LITEX_SDIO_CACHE_WIDTH
int "Block cache width"
default 64
---help---
The width of a single cache window. This translates to the size of a single
read-ahead buffer, and how many blocks are written to the underlying block
device in a single transaction.
config LITEX_SDIO_CACHE_COUNT
int "Block cache count"
default 16
---help---
The number of cache windows to use. Higher numbers improve performance by
caching blocks at different offets in the underlying device.
config LITEX_SDIO_CACHE_MULTIPLIER
int "Block cache multiplier"
default 1
---help---
Multiplies the underlying device's geometry to expose it in larger blocks.
endif
config LITEX_SDIO_MOUNT_BLKDEV
string "SDIO block device name"
default "/dev/mmcsd0"
default "/dev/mmcsd0" if !LITEX_SDIO_USE_CACHE
default "/dev/bmmcsd0" if LITEX_SDIO_USE_CACHE
depends on LITEX_SDIO
config LITEX_SDIO_MOUNT_MOUNTPOINT
string "SDIO mountpoint"

View file

@ -5,6 +5,7 @@
# You can then do "make savedefconfig" to generate a new defconfig file that includes your
# modifications.
#
# CONFIG_DISABLE_PSEUDOFS_OPERATIONS is not set
# CONFIG_DISABLE_PTHREAD is not set
# CONFIG_FS_PROCFS_EXCLUDE_BLOCKS is not set
# CONFIG_FS_PROCFS_EXCLUDE_ENVIRON is not set
@ -17,8 +18,42 @@
# CONFIG_FS_PROCFS_EXCLUDE_USAGE is not set
# CONFIG_FS_PROCFS_EXCLUDE_VERSION is not set
# CONFIG_NSH_DISABLEBG is not set
# CONFIG_NSH_DISABLE_BASENAME is not set
# CONFIG_NSH_DISABLE_CAT is not set
# CONFIG_NSH_DISABLE_CD is not set
# CONFIG_NSH_DISABLE_CMP is not set
# CONFIG_NSH_DISABLE_CP is not set
# CONFIG_NSH_DISABLE_DD is not set
# CONFIG_NSH_DISABLE_DF is not set
# CONFIG_NSH_DISABLE_DIRNAME is not set
# CONFIG_NSH_DISABLE_ECHO is not set
# CONFIG_NSH_DISABLE_ERROR_PRINT is not set
# CONFIG_NSH_DISABLE_EXPORT is not set
# CONFIG_NSH_DISABLE_FDINFO is not set
# CONFIG_NSH_DISABLE_FREE is not set
# CONFIG_NSH_DISABLE_HELP is not set
# CONFIG_NSH_DISABLE_HEXDUMP is not set
# CONFIG_NSH_DISABLE_KILL is not set
# CONFIG_NSH_DISABLE_LOSMART is not set
# CONFIG_NSH_DISABLE_LS is not set
# CONFIG_NSH_DISABLE_MB is not set
# CONFIG_NSH_DISABLE_MKDIR is not set
# CONFIG_NSH_DISABLE_MKFATFS is not set
# CONFIG_NSH_DISABLE_MKRD is not set
# CONFIG_NSH_DISABLE_MOUNT is not set
# CONFIG_NSH_DISABLE_MV is not set
# CONFIG_NSH_DISABLE_PRINTF is not set
# CONFIG_NSH_DISABLE_PS is not set
# CONFIG_NSH_DISABLE_PSSTACKUSAGE is not set
# CONFIG_NSH_DISABLE_PWD is not set
# CONFIG_NSH_DISABLE_RM is not set
# CONFIG_NSH_DISABLE_RMDIR is not set
# CONFIG_NSH_DISABLE_SLEEP is not set
# CONFIG_NSH_DISABLE_TIME is not set
# CONFIG_NSH_DISABLE_UMOUNT is not set
# CONFIG_NSH_DISABLE_UNAME is not set
# CONFIG_NSH_DISABLE_UPTIME is not set
# CONFIG_NSH_DISABLE_USLEEP is not set
# CONFIG_STANDARD_SERIAL is not set
CONFIG_ARCH="risc-v"
CONFIG_ARCH_BOARD="arty_a7"
@ -28,6 +63,7 @@ CONFIG_ARCH_CHIP_LITEX=y
CONFIG_ARCH_INTERRUPTSTACK=8192
CONFIG_ARCH_RISCV=y
CONFIG_ARCH_STACKDUMP=y
CONFIG_BLOCK_CACHE=y
CONFIG_BOARD_LOOPSPERMSEC=10000
CONFIG_BUILTIN=y
CONFIG_DEBUG_FULLOPT=y
@ -36,8 +72,6 @@ CONFIG_DEFAULT_SMALL=y
CONFIG_DEV_ZERO=y
CONFIG_EXAMPLES_HELLO=y
CONFIG_EXAMPLES_HELLO_STACKSIZE=8192
CONFIG_FAT_DMAMEMORY=y
CONFIG_FAT_FORCE_INDIRECT=y
CONFIG_FAT_LFN=y
CONFIG_FS_FAT=y
CONFIG_FS_FATTIME=y
@ -50,6 +84,9 @@ CONFIG_INTELHEX_BINARY=y
CONFIG_LIBC_PERROR_STDOUT=y
CONFIG_LIBC_STRERROR=y
CONFIG_LITEX_SDIO=y
CONFIG_LITEX_SDIO_CACHE_COUNT=16
CONFIG_LITEX_SDIO_CACHE_WIDTH=64
CONFIG_LITEX_SDIO_USE_CACHE=y
CONFIG_NFILE_DESCRIPTORS_PER_BLOCK=6
CONFIG_NSH_ARCHINIT=y
CONFIG_NSH_BUILTIN_APPS=y

View file

@ -76,7 +76,6 @@ int litex_bringup(void)
if (ret != OK)
{
syslog(LOG_ERR, "litex_sdio_initialize() failed %d\n", ret);
return ret;
}
/* If automount not configured, force a mount point.

View file

@ -27,6 +27,7 @@
#include <assert.h>
#include <nuttx/board.h>
#include <nuttx/arch.h>
#include <sys/mount.h>
#ifdef CONFIG_BOARDCTL_RESET
@ -60,6 +61,13 @@
int board_reset(int status)
{
#if defined(CONFIG_LITEX_SDIO_USE_CACHE)
/* Unmounting the filesystem forces flushing of any cached blocks */
nx_umount2(CONFIG_LITEX_SDIO_MOUNT_MOUNTPOINT, MNT_FORCE);
#endif
switch (status)
{
case CONFIG_BOARD_ASSERT_RESET_VALUE:

View file

@ -29,6 +29,7 @@
#include <debug.h>
#include <errno.h>
#include <nuttx/drivers/block_cache.h>
#include <nuttx/sdio.h>
#include <nuttx/mmcsd.h>
@ -125,7 +126,22 @@ int litex_sdio_initialize(void)
sdio_mediachange(sdio_dev, litex_sdio_get_card_detect());
return OK;
#if defined(CONFIG_LITEX_SDIO_USE_CACHE)
ret = block_cache_initialize(
"/dev/mmcsd0",
CONFIG_LITEX_SDIO_MOUNT_BLKDEV,
CONFIG_LITEX_SDIO_CACHE_WIDTH, /* cache width */
CONFIG_LITEX_SDIO_CACHE_COUNT, /* cache sections */
CONFIG_LITEX_SDIO_CACHE_MULTIPLIER /* block multiple. */
);
if (ret != OK)
{
ferr("ERROR: Failed to bind the buffered MMC/SD driver: %d\n", ret);
}
#endif
return ret;
}
#endif /* HAVE_SDMMC */

View file

@ -52,6 +52,10 @@ elseif(CONFIG_DRVR_READAHEAD)
list(APPEND SRCS rwbuffer.c)
endif()
if(CONFIG_BLOCK_CACHE)
list(APPEND SRCS block_cache.c)
endif()
if(CONFIG_DEV_RPMSG)
list(APPEND SRCS rpmsgdev.c)
endif()

View file

@ -106,6 +106,20 @@ bool "Lightweight Link Console Support"
menu "Buffering"
config BLOCK_CACHE
bool "Enable block cache wrapper"
default n
---help---
A block driver which can encapsulate another block device to
provide read-ahead and write buffering on multiple block ranges.
if BLOCK_CACHE
config BLOCK_CACHE_DEBUG
bool "Debug block cache buffers"
default n
endif
config DRVR_WRITEBUFFER
bool "Enable write buffer support"
default n

View file

@ -55,6 +55,10 @@ else ifeq ($(CONFIG_DRVR_READAHEAD),y)
CSRCS += rwbuffer.c
endif
ifeq ($(CONFIG_BLOCK_CACHE),y)
CSRCS += block_cache.c
endif
ifeq ($(CONFIG_DEV_RPMSG),y)
CSRCS += rpmsgdev.c
endif

966
drivers/misc/block_cache.c Normal file
View file

@ -0,0 +1,966 @@
/****************************************************************************
* drivers/misc/block_cache.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/param.h>
#include <sys/types.h>
#include <inttypes.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <string.h>
#include <assert.h>
#include <debug.h>
#include <errno.h>
#include <sys/mount.h>
#include <nuttx/kmalloc.h>
#include <nuttx/fs/fs.h>
#include <nuttx/fs/ioctl.h>
#include <nuttx/mutex.h>
#include <nuttx/drivers/block_cache.h>
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
#if defined(CONFIG_BLOCK_CACHE_DEBUG)
# define bcinfo _info
#else
# define bcinfo _none
#endif
#if defined(CONFIG_FS_LARGEFILE)
#define PRIBLKC PRIu64
#define PRIOFFS PRId64
#else
#define PRIBLKC PRIu32
#define PRIOFFS PRId32
#endif
/****************************************************************************
* Private Types
****************************************************************************/
enum block_flags
{
BLOCK_FREE = 0x1, /* The window is marked as free */
BLOCK_DIRTY = 0x2, /* The window contains dirty blocks */
};
struct cache_window_s
{
blkcnt_t start; /* The initial block index */
enum block_flags flags; /* Any flags for this window */
uint8_t * buffer; /* The underlying block buffer */
};
struct block_cache_dev_s
{
FAR struct inode * inode; /* Contained block device */
struct geometry geo; /* Contained device geometry */
size_t geo_multiple; /* External block multiple */
struct cache_window_s * cache; /* Array of block caches */
off_t last_index; /* The last index evicted */
uint16_t refs; /* Number of references */
size_t cache_width; /* The width of a cache */
size_t cache_count; /* The number of caches */
size_t cache_used_count; /* Number of used caches */
size_t aligned_mask; /* The mask for block alignment */
mutex_t lock; /* Lock */
};
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
/* Block device handles */
static int block_cache_open(FAR struct inode *inode);
static int block_cache_close(FAR struct inode *inode);
static ssize_t block_cache_read(FAR struct inode *inode,
FAR unsigned char *buffer,
blkcnt_t start_sector,
unsigned int nsectors);
static ssize_t block_cache_write(FAR struct inode *inode,
FAR const unsigned char *buffer, blkcnt_t start_block,
unsigned int nblocks);
static int block_cache_geometry(FAR struct inode *inode,
FAR struct geometry *geometry);
static int block_cache_ioctl(FAR struct inode *inode, int cmd,
unsigned long arg);
/* Interface to the underlying block device */
static ssize_t block_cache_reload(struct block_cache_dev_s * dev,
FAR uint8_t *buffer,
off_t startblock, size_t nblocks);
static ssize_t block_cache_sync(struct block_cache_dev_s * dev,
FAR const uint8_t *buffer,
off_t startblock, size_t nblocks);
/* Internal use */
int block_cache_write_internal(struct block_cache_dev_s * dev,
const unsigned char * buffer,
blkcnt_t startblock, unsigned int nblocks);
int block_cache_read_internal(struct block_cache_dev_s * dev,
unsigned char * buffer,
blkcnt_t startblock, unsigned int nblocks);
int block_cache_get_next(struct block_cache_dev_s * dev,
blkcnt_t aligned_start);
int block_cache_prime(struct block_cache_dev_s * dev,
blkcnt_t aligned_start);
int block_cache_evict(struct block_cache_dev_s * dev);
int block_cache_get_free(struct block_cache_dev_s * dev);
int block_cache_get_primed(struct block_cache_dev_s * dev,
blkcnt_t aligned_start);
int block_cache_flush(struct block_cache_dev_s * dev,
size_t cache_index);
void block_cache_deinit(struct block_cache_dev_s * dev);
int block_cache_init(struct block_cache_dev_s * dev);
/****************************************************************************
* Private Data
****************************************************************************/
static const struct block_operations g_bops =
{
block_cache_open, /* open */
block_cache_close, /* close */
block_cache_read, /* read */
block_cache_write, /* write */
block_cache_geometry, /* geometry */
block_cache_ioctl /* ioctl */
#ifndef CONFIG_DISABLE_PSEUDOFS_OPERATIONS
, NULL /* unlink */
#endif
};
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: block_cache_init
*
* Description:
* Initialize the array of windows used for caching blocks.
*
****************************************************************************/
int block_cache_init(struct block_cache_dev_s * dev)
{
dev->cache = (struct cache_window_s *)
kmm_zalloc(dev->cache_count * sizeof(struct cache_window_s));
if (!dev->cache)
{
return -ENOMEM;
}
for (int i = 0; i < dev->cache_count; i++)
{
dev->cache[i].start = -1;
dev->cache[i].flags = BLOCK_FREE;
dev->cache[i].buffer = (uint8_t *)kmm_zalloc(
dev->geo.geo_sectorsize * dev->cache_width);
/* Unwind any existing allocated buffers */
if (!dev->cache[i].buffer)
{
for (int j = (i - 1); j >= 0; j--)
{
kmm_free(dev->cache[j].buffer);
}
kmm_free(dev->cache);
return -ENOMEM;
}
}
return OK;
}
/****************************************************************************
* Name: block_cache_deinit
*
* Description:
* Deinitialize the array of cache windows used.
* Any blocks not flushed are lost.
*
****************************************************************************/
void block_cache_deinit(struct block_cache_dev_s * dev)
{
for (int i = 0; i < dev->cache_count; i++)
{
if (dev->cache[i].buffer)
{
kmm_free(dev->cache[i].buffer);
}
}
kmm_free(dev->cache);
}
/****************************************************************************
* Name: block_cache_flush
*
* Description:
* Flush the given cache index to the device, if the cache contains 'dirty'
* blocks, which don't match that of the underlying device.
*
****************************************************************************/
int block_cache_flush(struct block_cache_dev_s * dev, size_t cache_index)
{
int ret = OK;
if (dev->cache[cache_index].flags & BLOCK_DIRTY)
{
bcinfo("flush: %" PRIuPTR ", origin %" PRIBLKC "\n", cache_index,
dev->cache[cache_index].start);
ret = block_cache_sync(
dev,
dev->cache[cache_index].buffer,
dev->cache[cache_index].start,
dev->cache_width);
if (ret)
{
dev->cache[cache_index].flags &= ~BLOCK_DIRTY;
}
}
else
{
bcinfo("skip flush: %" PRIuPTR ", origin %" PRIBLKC "\n", cache_index,
dev->cache[cache_index].start);
}
return ret;
}
/****************************************************************************
* Name: block_cache_get_primed
*
* Description:
* Search all cache windows for a block which matches the aligned_start.
* Return the index is found.
*
****************************************************************************/
int block_cache_get_primed(struct block_cache_dev_s * dev,
blkcnt_t aligned_start)
{
int cache_index = -1;
for (size_t i = 0; i < dev->cache_count; i++)
{
if ((dev->cache[i].start == aligned_start))
{
cache_index = i;
bcinfo("found: %" PRIuPTR ", origin: %" PRIBLKC "\n",
i, aligned_start);
break;
}
}
return cache_index;
}
/****************************************************************************
* Name: block_cache_get_free
*
* Description:
* Find any cache index which is currently not used.
*
****************************************************************************/
int block_cache_get_free(struct block_cache_dev_s * dev)
{
int cache_index = -1;
if (dev->cache_used_count == dev->cache_count)
{
return cache_index;
}
for (int i = 0; i < dev->cache_count; i++)
{
if (dev->cache[i].flags & BLOCK_FREE)
{
bcinfo("empty: %" PRIdPTR "\n", i);
dev->cache_used_count++;
cache_index = i;
break;
}
}
return cache_index;
}
/****************************************************************************
* Name: block_cache_evict
*
* Description:
* Determine and evict a cache window in preparation for use with new
* blocks.
*
****************************************************************************/
int block_cache_evict(struct block_cache_dev_s * dev)
{
int ret;
int cache_index;
cache_index = dev->last_index;
bcinfo("evict: %" PRIdPTR ", origin: %" PRIBLKC "\n", cache_index,
dev->cache[cache_index].start);
ret = block_cache_flush(dev, cache_index);
if (ret >= 0)
{
ret = cache_index;
dev->last_index = (dev->last_index + 1) % dev->cache_count;
}
return ret;
}
/****************************************************************************
* Name: block_cache_prime
*
* Description:
* Reload a cache window with blocks from the encapsulated device.
* This either:
* - Finds an empty unused window.
* - Evicts an existing window and reloads it with the start block from
* the encapsulated device.
*
****************************************************************************/
int block_cache_prime(struct block_cache_dev_s * dev, blkcnt_t aligned_start)
{
int cache_index = -1;
int ret;
cache_index = block_cache_get_free(dev);
if (cache_index < 0)
{
cache_index = block_cache_evict(dev);
}
if (cache_index >= 0)
{
bcinfo("prime: %" PRIdPTR ", origin %" PRIBLKC,
cache_index, aligned_start);
ret = block_cache_reload(dev,
dev->cache[cache_index].buffer,
aligned_start, dev->cache_width);
if (ret > 0)
{
dev->cache[cache_index].start = aligned_start;
dev->cache[cache_index].flags &= ~BLOCK_FREE;
}
}
return cache_index;
}
/****************************************************************************
* Name: block_cache_get_next
*
* Description:
* Get the next available cache index to used for a block aligned to the
* start of a window. This either:
* - Finds an already available index which starts with aligned_start.
* - Finds an empty unused window.
* - Evicts an existing window and reloads it with the start block from
* the encapsulated device.
*
****************************************************************************/
int block_cache_get_next(struct block_cache_dev_s * dev,
blkcnt_t aligned_start)
{
int cache_number;
cache_number = block_cache_get_primed(dev, aligned_start);
if (cache_number < 0)
{
cache_number = block_cache_prime(dev, aligned_start);
}
#if defined(CONFIG_DEBUG_ASSERTIONS) && defined(CONFIG_BLOCK_CACHE_DEBUG)
if (cache_number >= 0)
{
/* Check to see if any caches have the same start block number */
for (int i = cache_number + 1; i < dev->cache_count; i++)
{
DEBUGASSERT(dev->cache[cache_number].start != dev->cache[i].start);
}
}
#endif
return cache_number;
}
/****************************************************************************
* Name: block_cache_read_internal
*
* Description:
* Internal cached block functionality. Performs the following:
* - Write to an existing window if the start block is already cached.
* - Write to a new window if the start block is not already cached.
* - Repeat as necessary to cache the entire buffer.
*
* Returned Value:
* The number of blocks written of a negative value on error.
*
****************************************************************************/
int block_cache_read_internal(struct block_cache_dev_s * dev,
unsigned char * buffer,
blkcnt_t startblock, unsigned int nblocks)
{
blkcnt_t aligned_start;
unsigned int remaining;
unsigned int read;
off_t read_count;
off_t blocks_remaining;
int cache_number;
blkcnt_t offset;
bcinfo("startblock: %" PRIBLKC ", nblocks: %" PRIuPTR "\n",
startblock, nblocks);
/* Calculate the aligned start block and offset within that block */
aligned_start = startblock & ~(dev->aligned_mask);
offset = startblock & dev->aligned_mask;
remaining = nblocks;
read = 0;
/** Iterate until all blocks are read */
while (remaining > 0)
{
/** Get the cache window index for the aligned_start block */
cache_number = block_cache_get_next(dev, aligned_start);
/** Check for errors in cache retrieval */
if (cache_number < 0)
{
return cache_number;
}
/** Calculate the number of blocks that can be read in this operation */
blocks_remaining = dev->cache_width - offset;
read_count = remaining > blocks_remaining ?
blocks_remaining : remaining;
bcinfo("use %" PRIOFFS "blocks from offset %" PRIBLKC "\n",
read_count, offset);
/** Copy data from the cache window to the buffer */
memcpy(
&buffer[read * dev->geo.geo_sectorsize],
&dev->cache[cache_number].buffer[offset * dev->geo.geo_sectorsize],
read_count * dev->geo.geo_sectorsize
);
/** Update remaining block counts and offsets */
remaining -= read_count;
read += read_count;
aligned_start += dev->cache_width;
offset = 0;
}
DEBUGASSERT(read == nblocks);
return read;
}
/****************************************************************************
* Name: block_cache_write_internal
*
* Description:
* Internal cached block functionality. Performs the following:
* - Write to an existing window if the start block is already cached.
* - Write to a new window if the start block is not already cached.
* - Repeat as necessary to cache the entire buffer.
*
* Returned Value:
* The number of blocks written of a negative value on error.
*
****************************************************************************/
int block_cache_write_internal(struct block_cache_dev_s * dev,
const unsigned char * buffer,
blkcnt_t startblock, unsigned int nblocks)
{
blkcnt_t aligned_start;
unsigned int remaining;
unsigned int written;
off_t write_count;
off_t blocks_remaining;
int cache_number;
blkcnt_t offset;
bcinfo("startblock: %" PRIBLKC ", nblocks: %" PRIuPTR "\n",
startblock, nblocks);
/** Calculate the aligned start block and offset within that block */
aligned_start = startblock & ~(dev->aligned_mask);
offset = startblock & dev->aligned_mask;
remaining = nblocks;
written = 0;
/** Iterate until all blocks are written */
while (remaining > 0)
{
/** Get the cache window index for the aligned_start block */
cache_number = block_cache_get_next(dev, aligned_start);
/** Check for errors in cache retrieval */
if (cache_number < 0)
{
return cache_number;
}
/** Number of blocks that can be written in this operation */
blocks_remaining = dev->cache_width - offset;
write_count = remaining > blocks_remaining ?
blocks_remaining : remaining;
bcinfo("use %" PRIOFFS " blocks from offset %" PRIBLKC "\n",
write_count, offset);
/** Copy data from the buffer to the cache window */
memcpy(
&dev->cache[cache_number].buffer[offset * dev->geo.geo_sectorsize],
&buffer[written * dev->geo.geo_sectorsize],
write_count * dev->geo.geo_sectorsize
);
/** Mark the cache window as dirty to indicate changes */
dev->cache[cache_number].flags |= BLOCK_DIRTY;
/** Update remaining block counts and offsets */
remaining -= write_count;
written += write_count;
aligned_start += dev->cache_width;
offset = 0;
}
DEBUGASSERT(written == nblocks);
return written;
}
/****************************************************************************
* Name: block_cache_open
*
* Description:
* Exposed block open interface. Just increments reference count.
*
****************************************************************************/
static int block_cache_open(FAR struct inode * inode)
{
int ret;
FAR struct block_cache_dev_s * dev;
DEBUGASSERT(inode && inode->i_private);
dev = (FAR struct block_cache_dev_s *)inode->i_private;
ret = nxmutex_lock(&dev->lock);
if (ret < 0)
{
return ret;
}
dev->refs++;
nxmutex_unlock(&dev->lock);
return OK;
}
/****************************************************************************
* Name: block_cache_close
*
* Description:
* Exposed block close interface. Decrement reference count and flush the
* cache when it reaches zero.
*
****************************************************************************/
static int block_cache_close(FAR struct inode * inode)
{
int ret;
FAR struct block_cache_dev_s * dev;
DEBUGASSERT(inode && inode->i_private);
dev = (FAR struct block_cache_dev_s *)inode->i_private;
ret = nxmutex_lock(&dev->lock);
if (ret < 0)
{
return ret;
}
if (--dev->refs == 0)
{
for (int i = 0; i < dev->cache_count; i++)
{
ret = block_cache_flush(dev, i);
DEBUGASSERT(ret >= 0);
if (ret < 0)
{
break;
}
}
}
nxmutex_unlock(&dev->lock);
return OK;
}
/****************************************************************************
* Name: block_cache_reload
*
* Description:
* Read the specified number of sectors to the encapsulated
* block device.
*
****************************************************************************/
static ssize_t block_cache_reload(struct block_cache_dev_s * dev,
FAR uint8_t *buffer, off_t startblock, size_t nblocks)
{
return dev->inode->u.i_bops->read(dev->inode, buffer, startblock, nblocks);
}
/****************************************************************************
* Name: block_cache_read
*
* Description:
* Read the specified number of sectors. Utilizing the read ahead buffer
* where possible.
*
****************************************************************************/
static ssize_t block_cache_read(FAR struct inode * inode,
unsigned char * buffer, blkcnt_t startblock,
unsigned int nblocks)
{
int ret;
FAR struct block_cache_dev_s * dev;
DEBUGASSERT(inode && inode->i_private);
dev = (FAR struct block_cache_dev_s *)inode->i_private;
ret = nxmutex_lock(&dev->lock);
if (ret < 0)
{
return ret;
}
ret = block_cache_read_internal(
dev,
buffer,
startblock * dev->geo_multiple,
nblocks * dev->geo_multiple
);
if (ret > 0)
{
ret /= dev->geo_multiple;
}
nxmutex_unlock(&dev->lock);
return ret;
}
/****************************************************************************
* Name: block_cache_sync
*
* Description:
* Sync the specified number of sectors to the encapsulated
* block device.
*
****************************************************************************/
static ssize_t block_cache_sync(struct block_cache_dev_s * dev,
FAR const uint8_t *buffer,
off_t startblock, size_t nblocks)
{
return dev->inode->u.i_bops->write(dev->inode, buffer,
startblock, nblocks);
}
/****************************************************************************
* Name: block_cache_write
*
* Description:
* Exposed block write interface. Attempt to cache the incoming buffer
* to an existing or available cache window. Perform any synchronisation
* needed.
*
****************************************************************************/
static ssize_t block_cache_write(FAR struct inode *inode,
FAR const unsigned char *buffer,
blkcnt_t startblock, unsigned int nblocks)
{
int ret;
struct block_cache_dev_s *dev;
DEBUGASSERT(inode && inode->i_private);
dev = (struct block_cache_dev_s *)inode->i_private;
ret = nxmutex_lock(&dev->lock);
if (ret < 0)
{
return ret;
}
ret = block_cache_write_internal(
dev,
buffer,
startblock * dev->geo_multiple,
nblocks * dev->geo_multiple
);
if (ret > 0)
{
ret /= dev->geo_multiple;
}
nxmutex_unlock(&dev->lock);
return ret;
}
/****************************************************************************
* Name: block_cache_geometry
*
* Description:
* Retrieve the device geometry multiplied by the scaling factor between
* the encapsulated and exposed block device.
*
****************************************************************************/
static int block_cache_geometry(FAR struct inode *inode,
FAR struct geometry *geometry)
{
int ret;
FAR struct block_cache_dev_s *dev;
DEBUGASSERT(inode && geometry && inode->i_private);
dev = (struct block_cache_dev_s *)inode->i_private;
ret = nxmutex_lock(&dev->lock);
if (ret < 0)
{
return ret;
}
ret = dev->inode->u.i_bops->geometry(dev->inode, &dev->geo);
if (ret >= 0)
{
geometry->geo_available = dev->geo.geo_available;
geometry->geo_mediachanged = dev->geo.geo_mediachanged;
geometry->geo_writeenabled = dev->geo.geo_writeenabled;
geometry->geo_nsectors = dev->geo.geo_nsectors
/ dev->geo_multiple;
geometry->geo_sectorsize = dev->geo.geo_sectorsize
* dev->geo_multiple;
}
nxmutex_unlock(&dev->lock);
return ret;
}
/****************************************************************************
* Name: block_cache_ioctl
*
* Description:
* Exposed ioctl interface. All ioctl commands except for flush are
* forwarded through to the encapsulated device.
*
****************************************************************************/
static int block_cache_ioctl(FAR struct inode *inode, int cmd,
unsigned long arg)
{
FAR struct block_cache_dev_s *dev;
int ret;
DEBUGASSERT(inode && inode->i_private);
dev = (struct block_cache_dev_s *)inode->i_private;
ret = nxmutex_lock(&dev->lock);
if (ret < 0)
{
return ret;
}
if (cmd == BIOC_FLUSH)
{
for (int i = 0; i < dev->cache_count; i++)
{
ret = block_cache_flush(dev, i);
if (ret < 0)
{
nxmutex_unlock(&dev->lock);
return ret;
}
}
}
ret = dev->inode->u.i_bops->ioctl(dev->inode, cmd, arg);
nxmutex_unlock(&dev->lock);
return ret;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: block_cache_initialize
*
* Description:
* Initialize to provide a cached block device wrapper around an existing
* block device
*
* Input Parameters:
* source: The source block driver to encapsulate
* destination: The path to the exposed block driver
* cache_width: The number of blocks in a single cache window.
* cache_count: The number of cache windows
* geo_multiplier: A multiplier applied between the encapsulated and
* exposed block devices.
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int block_cache_initialize(FAR const char *source,
FAR const char *destination,
const size_t cache_width,
const size_t cache_count,
const size_t geo_multiplyer)
{
int ret;
struct inode *inode;
struct block_cache_dev_s *dev;
/* Minimal sanity checks */
DEBUGASSERT(source && destination);
/* The cache width should be a power of two */
DEBUGASSERT(popcount(cache_width) == 1);
/* The multiplyer is applied to the geometry, zero doesn't make sense */
DEBUGASSERT(geo_multiplyer > 0);
finfo("bind \"%s\", to \"%s\"\n", source, destination);
ret = open_blockdriver(source, 0666, &inode);
if (ret < 0)
{
return ret;
}
/* Allocate a block buffer device structure */
dev = (FAR struct block_cache_dev_s *)
kmm_zalloc(sizeof(struct block_cache_dev_s));
if (dev)
{
/* Initialize the block buffer device structure */
nxmutex_init(&dev->lock);
dev->inode = inode;
ret = dev->inode->u.i_bops->geometry(dev->inode, &dev->geo);
if (ret < 0)
{
ferr("ERROR: node geometry failed: %" PRIdPTR "\n", ret);
kmm_free(dev);
return ret;
}
dev->cache_width = cache_width;
dev->cache_count = cache_count;
dev->geo_multiple = geo_multiplyer;
dev->cache_used_count = 0;
dev->last_index = 0;
dev->aligned_mask = ((dev->cache_width) - 1);
block_cache_init(dev);
ret = register_blockdriver(destination, &g_bops, 0666, dev);
if (ret < 0)
{
ferr("ERROR: register_blockdriver failed: %" PRIdPTR "\n", -ret);
block_cache_deinit(dev);
kmm_free(dev);
}
}
else
{
close_blockdriver(inode);
}
return ret;
}

View file

@ -0,0 +1,141 @@
/****************************************************************************
* include/nuttx/drivers/block_cache.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __INCLUDE_NUTTX_MISC_BLOCK_CACHE_H
#define __INCLUDE_NUTTX_MISC_BLOCK_CACHE_H
/****************************************************************************
* Provides a block device driver which can wrap an exiting block device.
* The new block device provides cached access to the underlying device.
*
* Caching Strategy Summary:
*
* The block_cache_read_internal and block_cache_write_internal functions
* implement a caching strategy that optimizes read and write operations to
* a block device. The strategy utilizes a cache consisting of multiple
* cache windows, each of which can store a portion of the device's blocks.
*
* Key Points:
*
* 1. Cache Windows:
* - The cache is divided into cache windows, each of size 'cache_width'
* blocks.
* - Each cache window holds a subset of the device's blocks in memory.
*
* 2. Block Alignment:
* - The caching is aligned to block boundaries to minimize cache misses.
*
* 3. Read Operation (block_cache_read_internal):
* - Reads data from the cache when available, minimizing access to the
* underlying device. Performs read ahead operation up to the size of
* the cache window.
* - Handles partial reads, synchronizing data between the cache and the
* device.
*
* 4. Write Operation (block_cache_write_internal):
* - Writes data to the cache, marking blocks as dirty.
* - Synchronizes dirty blocks with the underlying device when necessary.
*
* 5. Cache Management:
* - Cache windows are managed to ensure they contain the most relevant
* blocks.
* - Cache hits result in faster access times, while cache misses trigger
* cache loading and eviction.
*
* 6. Multiplier:
* - A 'geo_multiplier' factor is applied to the device geometry, allowing
* the cache to work with a multiple of the device's block size. This
* can allow the encapsulated device to be exposed with a larger block
* size.
*
* This caching strategy enhances performance by reducing the number of
* read/write operations to the underlying block device and minimizing
* redundant data transfers.
*
* Warning:
* The cache is only flushed to the underlying under the following
* conditions:
* - Close is called on the device handle, and it is the final remaining
* open reference.
* - BIOC_FLUSH IOCTL command is used. This is probably done by the
* filesystem when needed or requested by the user, through fsync or
* a similar mechanism.
*
* If this driver is used with removable media, and the device is removed
* without the cache being flushed. The data is gone!
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#ifndef __ASSEMBLY__
#ifdef __cplusplus
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: block_cache_initialize
*
* Description:
* Initialize to provide a cached block device wrapper around an existing
* block device
*
* Input Parameters:
* source: The source block driver to encapsulate
* destination: The path to the exposed block driver
* cache_width: The number of blocks in a single cache window.
* cache_count: The number of cache windows
* geo_multiplier: A multiplier applied between the encapsulated and
* exposed block devices.
*
* Returned Value:
* Zero on success; a negated errno value on failure.
*
****************************************************************************/
int block_cache_initialize(FAR const char *source,
FAR const char *destination,
const size_t cache_width,
const size_t cache_count,
const size_t geo_multiplier);
#undef EXTERN
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif /* __INCLUDE_NUTTX_MISC_BLOCK_CACHE_H */