Changeset 2eb89ad in rtems for cpukit/libblock


Ignore:
Timestamp:
Aug 2, 2008, 6:23:45 AM (12 years ago)
Author:
Chris Johns <chrisj@…>
Branches:
4.10, 4.11, 4.9, master
Children:
f031251c
Parents:
799ef3b
Message:

2008-08-02 Chris Johns (chrisj@…>

  • libblock/include/rtems/blkdev.h: Remove count and start from rtems_blkdev_request. Add RTEMS_BLKDEV_START_BLOCK macro.
  • libblock/src/bdbuf.c: Add read ahead blocks always consecutive comment. Change count to bufnum and remove start references. Sort the transfer list so blocks are consecutive where possible.
  • libblock/src/blkdev.c, libblock/src/nvdisk.c, libblock/src/ramdisk.c: Change count to bufnum and remove start references.
Location:
cpukit/libblock
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • cpukit/libblock/include/rtems/blkdev.h

    r799ef3b r2eb89ad  
    8888    /* If status != RTEMS_SUCCESSFUL, this field contains error code */
    8989    int error;
    90     /* Start block number */
    91     rtems_blkdev_bnum start;
    92     /* Number of blocks to be exchanged */
    93     uint32_t count;
    94     /* Number of buffers provided */
     90    /* Number of blocks for this request. */
    9591    uint32_t bufnum;
    9692
     
    10197    rtems_blkdev_sg_buffer bufs[0];
    10298} rtems_blkdev_request;
     99
     100/* The start block in a request. Only valid if the driver has returned the
     101 * RTEMS_BLKDEV_CAPABILITIES of RTEMS_BLKDEV_CAP_MULTISECTOR_CONT */
     102#define RTEMS_BLKDEV_START_BLOCK(_r) (_r->bufs[0].block)
    103103
    104104/* Block device IOCTL request codes */
  • cpukit/libblock/src/bdbuf.c

    r799ef3b r2eb89ad  
    15081508 * already with a user when this call is made the call is blocked until the
    15091509 * buffer is returned. The highest priority waiter will obtain the buffer
    1510  * first.
     1510 * first.
     1511 *
     1512 * @note Read ahead always reads buffers in sequence. All multi-block reads
     1513 *       read consecutive blocks.
    15111514 *
    15121515 * @param device Device number (constructed of major and minor device number)
     
    15561559  }
    15571560
    1558   req->count = 0;
    15591561  req->bufnum = 0;
    15601562
     
    15751577  rtems_bdbuf_lock_pool (pool);
    15761578
    1577   while (req->count < read_ahead_count)
     1579  while (req->bufnum < read_ahead_count)
    15781580  {
    15791581    /*
     
    15861588     */
    15871589    bd = rtems_bdbuf_get_buffer (dd->phys_dev, pool,
    1588                                  block + req->count,
    1589                                  req->count == 0 ? FALSE : TRUE);
     1590                                 block + req->bufnum,
     1591                                 req->bufnum == 0 ? FALSE : TRUE);
    15901592
    15911593    /*
     
    16141616     *       node that can be used.
    16151617     */
    1616     req->bufs[req->count].user   = bd;
    1617     req->bufs[req->count].block  = bd->block;
    1618     req->bufs[req->count].length = dd->block_size;
    1619     req->bufs[req->count].buffer = bd->buffer;
    1620     req->count++;
     1618    req->bufs[req->bufnum].user   = bd;
     1619    req->bufs[req->bufnum].block  = bd->block;
     1620    req->bufs[req->bufnum].length = dd->block_size;
     1621    req->bufs[req->bufnum].buffer = bd->buffer;
    16211622    req->bufnum++;
    16221623  }
     
    16261627   * the block in the cache so return it.
    16271628   */
    1628   if (req->count)
     1629  if (req->bufnum)
    16291630  {
    16301631    /*
     
    16501651    req->status = RTEMS_RESOURCE_IN_USE;
    16511652    req->error = 0;
    1652     req->start = dd->start;
    16531653 
    16541654    result = dd->ioctl (dd->phys_dev->dev, RTEMS_BLKIO_REQUEST, req);
     
    16771677    rtems_bdbuf_lock_pool (pool);
    16781678
    1679     for (b = 1; b < req->count; b++)
     1679    for (b = 1; b < req->bufnum; b++)
    16801680    {
    16811681      bd = req->bufs[b].user;
     
    20492049        {
    20502050          rtems_chain_node* next_node = node->next;
     2051          rtems_chain_node* tnode = rtems_chain_tail (transfer);
     2052   
     2053          /*
     2054           * The blocks on the transfer list are sorted in block order. This
     2055           * means multi-block transfers for drivers that require consecutive
     2056           * blocks perform better with sorted blocks and for real disks it may
     2057           * help lower head movement.
     2058           */
     2059
     2060          bd->state = RTEMS_BDBUF_STATE_TRANSFER;
     2061
    20512062          rtems_chain_extract (node);
    2052           rtems_chain_append (transfer, node);
     2063
     2064          tnode = tnode->previous;
     2065         
     2066          while (node && !rtems_chain_is_head (transfer, tnode))
     2067          {
     2068            rtems_bdbuf_buffer* tbd = (rtems_bdbuf_buffer*) tnode;
     2069
     2070            if (bd->block > tbd->block)
     2071            {
     2072              rtems_chain_insert (tnode, node);
     2073              node = NULL;
     2074            }
     2075            else
     2076              tnode = tnode->previous;
     2077          }
     2078
     2079          if (node)
     2080            rtems_chain_prepend (transfer, node);
     2081         
    20532082          node = next_node;
    2054           bd->state = RTEMS_BDBUF_STATE_TRANSFER;
    20552083        }
    20562084        else
     
    21622190
    21632191      write_req->status = RTEMS_RESOURCE_IN_USE;
    2164       write_req->start = dd->start;
    21652192      write_req->error = 0;
    2166       write_req->count = 0;
    21672193      write_req->bufnum = 0;
    21682194
     
    21822208       
    21832209        if ((dd->capabilities & RTEMS_BLKDEV_CAP_MULTISECTOR_CONT) &&
    2184             write_req->count &&
     2210            write_req->bufnum &&
    21852211            (bd->block != (last_block + 1)))
    21862212        {
     
    21902216        else
    21912217        {
    2192           write_req->bufs[write_req->count].user   = bd;
    2193           write_req->bufs[write_req->count].block  = bd->block;
    2194           write_req->bufs[write_req->count].length = dd->block_size;
    2195           write_req->bufs[write_req->count].buffer = bd->buffer;
    2196           write_req->count++;
     2218          write_req->bufs[write_req->bufnum].user   = bd;
     2219          write_req->bufs[write_req->bufnum].block  = bd->block;
     2220          write_req->bufs[write_req->bufnum].length = dd->block_size;
     2221          write_req->bufs[write_req->bufnum].buffer = bd->buffer;
    21972222          write_req->bufnum++;
    21982223          last_block = bd->block;
     
    22052230
    22062231        if (rtems_chain_is_empty (&transfer) ||
    2207             (write_req->count >= rtems_bdbuf_configuration.max_write_blocks))
     2232            (write_req->bufnum >= rtems_bdbuf_configuration.max_write_blocks))
    22082233          write = TRUE;
    22092234
     
    22242249            rtems_bdbuf_lock_pool (pool);
    22252250             
    2226             for (b = 0; b < write_req->count; b++)
     2251            for (b = 0; b < write_req->bufnum; b++)
    22272252            {
    22282253              bd = write_req->bufs[b].user;
     
    22532278            rtems_bdbuf_lock_pool (pool);
    22542279
    2255             for (b = 0; b < write_req->count; b++)
     2280            for (b = 0; b < write_req->bufnum; b++)
    22562281            {
    22572282              bd = write_req->bufs[b].user;
     
    22752300          write_req->status = RTEMS_RESOURCE_IN_USE;
    22762301          write_req->error = 0;
    2277           write_req->count = 0;
    22782302          write_req->bufnum = 0;
    22792303        }
  • cpukit/libblock/src/blkdev.c

    r799ef3b r2eb89ad  
    233233        {
    234234            rtems_blkdev_request *req = args->buffer;
    235             req->start += dd->start;
    236235            args->ioctl_return = dd->ioctl(dd->phys_dev->dev, args->command,
    237236                                           req);
  • cpukit/libblock/src/nvdisk.c

    r799ef3b r2eb89ad  
    589589#endif
    590590
    591   remains = req->count * nvd->block_size;
     591  remains = req->bufnum * nvd->block_size;
    592592 
    593593  for (b = 0; b < req->bufnum; b++, sg++)
  • cpukit/libblock/src/ramdisk.c

    r799ef3b r2eb89ad  
    100100#endif
    101101
    102     remains = rd->block_size * req->count;
     102    remains = rd->block_size * req->bufnum;
    103103    sg = req->bufs;
    104104    for (i = 0; (remains > 0) && (i < req->bufnum); i++, sg++)
     
    138138    rtems_ramdisk_printf (rd, "ramdisk write: start=%d, blocks=%d remains=%d",
    139139                          req->bufs[0].block, req->bufnum,
    140                           rd->block_size * req->count);
    141 #endif
    142     remains = rd->block_size * req->count;
     140                          rd->block_size * req->bufnum);
     141#endif
     142    remains = rd->block_size * req->bufnum;
    143143    sg = req->bufs;
    144144    for (i = 0; (remains > 0) && (i < req->bufnum); i++, sg++)
Note: See TracChangeset for help on using the changeset viewer.