|
8 | 8 | #include <linux/writeback.h> |
9 | 9 | #include <linux/swap.h> |
10 | 10 | #include <linux/migrate.h> |
11 | | -#include "internal.h" |
12 | 11 | #include "trace.h" |
13 | 12 |
|
14 | 13 | #include "../internal.h" |
@@ -1550,219 +1549,6 @@ void iomap_finish_folio_write(struct inode *inode, struct folio *folio, |
1550 | 1549 | } |
1551 | 1550 | EXPORT_SYMBOL_GPL(iomap_finish_folio_write); |
1552 | 1551 |
|
1553 | | -/* |
1554 | | - * We're now finished for good with this ioend structure. Update the page |
1555 | | - * state, release holds on bios, and finally free up memory. Do not use the |
1556 | | - * ioend after this. |
1557 | | - */ |
1558 | | -u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend) |
1559 | | -{ |
1560 | | - struct inode *inode = ioend->io_inode; |
1561 | | - struct bio *bio = &ioend->io_bio; |
1562 | | - struct folio_iter fi; |
1563 | | - u32 folio_count = 0; |
1564 | | - |
1565 | | - if (ioend->io_error) { |
1566 | | - mapping_set_error(inode->i_mapping, ioend->io_error); |
1567 | | - if (!bio_flagged(bio, BIO_QUIET)) { |
1568 | | - pr_err_ratelimited( |
1569 | | -"%s: writeback error on inode %lu, offset %lld, sector %llu", |
1570 | | - inode->i_sb->s_id, inode->i_ino, |
1571 | | - ioend->io_offset, ioend->io_sector); |
1572 | | - } |
1573 | | - } |
1574 | | - |
1575 | | - /* walk all folios in bio, ending page IO on them */ |
1576 | | - bio_for_each_folio_all(fi, bio) { |
1577 | | - iomap_finish_folio_write(inode, fi.folio, fi.length); |
1578 | | - folio_count++; |
1579 | | - } |
1580 | | - |
1581 | | - bio_put(bio); /* frees the ioend */ |
1582 | | - return folio_count; |
1583 | | -} |
1584 | | - |
1585 | | -static void ioend_writeback_end_bio(struct bio *bio) |
1586 | | -{ |
1587 | | - struct iomap_ioend *ioend = iomap_ioend_from_bio(bio); |
1588 | | - |
1589 | | - ioend->io_error = blk_status_to_errno(bio->bi_status); |
1590 | | - iomap_finish_ioend_buffered(ioend); |
1591 | | -} |
1592 | | - |
1593 | | -/* |
1594 | | - * We cannot cancel the ioend directly in case of an error, so call the bio end |
1595 | | - * I/O handler with the error status here to run the normal I/O completion |
1596 | | - * handler. |
1597 | | - */ |
1598 | | -int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error) |
1599 | | -{ |
1600 | | - struct iomap_ioend *ioend = wpc->wb_ctx; |
1601 | | - |
1602 | | - if (!ioend->io_bio.bi_end_io) |
1603 | | - ioend->io_bio.bi_end_io = ioend_writeback_end_bio; |
1604 | | - |
1605 | | - if (WARN_ON_ONCE(wpc->iomap.flags & IOMAP_F_ANON_WRITE)) |
1606 | | - error = -EIO; |
1607 | | - |
1608 | | - if (error) { |
1609 | | - ioend->io_bio.bi_status = errno_to_blk_status(error); |
1610 | | - bio_endio(&ioend->io_bio); |
1611 | | - return error; |
1612 | | - } |
1613 | | - |
1614 | | - submit_bio(&ioend->io_bio); |
1615 | | - return 0; |
1616 | | -} |
1617 | | -EXPORT_SYMBOL_GPL(iomap_ioend_writeback_submit); |
1618 | | - |
1619 | | -static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc, |
1620 | | - loff_t pos, u16 ioend_flags) |
1621 | | -{ |
1622 | | - struct bio *bio; |
1623 | | - |
1624 | | - bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, |
1625 | | - REQ_OP_WRITE | wbc_to_write_flags(wpc->wbc), |
1626 | | - GFP_NOFS, &iomap_ioend_bioset); |
1627 | | - bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos); |
1628 | | - bio->bi_write_hint = wpc->inode->i_write_hint; |
1629 | | - wbc_init_bio(wpc->wbc, bio); |
1630 | | - wpc->nr_folios = 0; |
1631 | | - return iomap_init_ioend(wpc->inode, bio, pos, ioend_flags); |
1632 | | -} |
1633 | | - |
1634 | | -static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos, |
1635 | | - u16 ioend_flags) |
1636 | | -{ |
1637 | | - struct iomap_ioend *ioend = wpc->wb_ctx; |
1638 | | - |
1639 | | - if (ioend_flags & IOMAP_IOEND_BOUNDARY) |
1640 | | - return false; |
1641 | | - if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) != |
1642 | | - (ioend->io_flags & IOMAP_IOEND_NOMERGE_FLAGS)) |
1643 | | - return false; |
1644 | | - if (pos != ioend->io_offset + ioend->io_size) |
1645 | | - return false; |
1646 | | - if (!(wpc->iomap.flags & IOMAP_F_ANON_WRITE) && |
1647 | | - iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio)) |
1648 | | - return false; |
1649 | | - /* |
1650 | | - * Limit ioend bio chain lengths to minimise IO completion latency. This |
1651 | | - * also prevents long tight loops ending page writeback on all the |
1652 | | - * folios in the ioend. |
1653 | | - */ |
1654 | | - if (wpc->nr_folios >= IOEND_BATCH_SIZE) |
1655 | | - return false; |
1656 | | - return true; |
1657 | | -} |
1658 | | - |
1659 | | -/* |
1660 | | - * Test to see if we have an existing ioend structure that we could append to |
1661 | | - * first; otherwise finish off the current ioend and start another. |
1662 | | - * |
1663 | | - * If a new ioend is created and cached, the old ioend is submitted to the block |
1664 | | - * layer instantly. Batching optimisations are provided by higher level block |
1665 | | - * plugging. |
1666 | | - * |
1667 | | - * At the end of a writeback pass, there will be a cached ioend remaining on the |
1668 | | - * writepage context that the caller will need to submit. |
1669 | | - */ |
1670 | | -ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio, |
1671 | | - loff_t pos, loff_t end_pos, unsigned int dirty_len) |
1672 | | -{ |
1673 | | - struct iomap_ioend *ioend = wpc->wb_ctx; |
1674 | | - size_t poff = offset_in_folio(folio, pos); |
1675 | | - unsigned int ioend_flags = 0; |
1676 | | - unsigned int map_len = min_t(u64, dirty_len, |
1677 | | - wpc->iomap.offset + wpc->iomap.length - pos); |
1678 | | - int error; |
1679 | | - |
1680 | | - trace_iomap_add_to_ioend(wpc->inode, pos, dirty_len, &wpc->iomap); |
1681 | | - |
1682 | | - WARN_ON_ONCE(!folio->private && map_len < dirty_len); |
1683 | | - |
1684 | | - switch (wpc->iomap.type) { |
1685 | | - case IOMAP_INLINE: |
1686 | | - WARN_ON_ONCE(1); |
1687 | | - return -EIO; |
1688 | | - case IOMAP_HOLE: |
1689 | | - return map_len; |
1690 | | - default: |
1691 | | - break; |
1692 | | - } |
1693 | | - |
1694 | | - if (wpc->iomap.type == IOMAP_UNWRITTEN) |
1695 | | - ioend_flags |= IOMAP_IOEND_UNWRITTEN; |
1696 | | - if (wpc->iomap.flags & IOMAP_F_SHARED) |
1697 | | - ioend_flags |= IOMAP_IOEND_SHARED; |
1698 | | - if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY)) |
1699 | | - ioend_flags |= IOMAP_IOEND_BOUNDARY; |
1700 | | - |
1701 | | - if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) { |
1702 | | -new_ioend: |
1703 | | - if (ioend) { |
1704 | | - error = wpc->ops->writeback_submit(wpc, 0); |
1705 | | - if (error) |
1706 | | - return error; |
1707 | | - } |
1708 | | - wpc->wb_ctx = ioend = iomap_alloc_ioend(wpc, pos, ioend_flags); |
1709 | | - } |
1710 | | - |
1711 | | - if (!bio_add_folio(&ioend->io_bio, folio, map_len, poff)) |
1712 | | - goto new_ioend; |
1713 | | - |
1714 | | - iomap_start_folio_write(wpc->inode, folio, map_len); |
1715 | | - |
1716 | | - /* |
1717 | | - * Clamp io_offset and io_size to the incore EOF so that ondisk |
1718 | | - * file size updates in the ioend completion are byte-accurate. |
1719 | | - * This avoids recovering files with zeroed tail regions when |
1720 | | - * writeback races with appending writes: |
1721 | | - * |
1722 | | - * Thread 1: Thread 2: |
1723 | | - * ------------ ----------- |
1724 | | - * write [A, A+B] |
1725 | | - * update inode size to A+B |
1726 | | - * submit I/O [A, A+BS] |
1727 | | - * write [A+B, A+B+C] |
1728 | | - * update inode size to A+B+C |
1729 | | - * <I/O completes, updates disk size to min(A+B+C, A+BS)> |
1730 | | - * <power failure> |
1731 | | - * |
1732 | | - * After reboot: |
1733 | | - * 1) with A+B+C < A+BS, the file has zero padding in range |
1734 | | - * [A+B, A+B+C] |
1735 | | - * |
1736 | | - * |< Block Size (BS) >| |
1737 | | - * |DDDDDDDDDDDD0000000000000| |
1738 | | - * ^ ^ ^ |
1739 | | - * A A+B A+B+C |
1740 | | - * (EOF) |
1741 | | - * |
1742 | | - * 2) with A+B+C > A+BS, the file has zero padding in range |
1743 | | - * [A+B, A+BS] |
1744 | | - * |
1745 | | - * |< Block Size (BS) >|< Block Size (BS) >| |
1746 | | - * |DDDDDDDDDDDD0000000000000|00000000000000000000000000| |
1747 | | - * ^ ^ ^ ^ |
1748 | | - * A A+B A+BS A+B+C |
1749 | | - * (EOF) |
1750 | | - * |
1751 | | - * D = Valid Data |
1752 | | - * 0 = Zero Padding |
1753 | | - * |
1754 | | - * Note that this defeats the ability to chain the ioends of |
1755 | | - * appending writes. |
1756 | | - */ |
1757 | | - ioend->io_size += map_len; |
1758 | | - if (ioend->io_offset + ioend->io_size > end_pos) |
1759 | | - ioend->io_size = end_pos - ioend->io_offset; |
1760 | | - |
1761 | | - wbc_account_cgroup_owner(wpc->wbc, &folio->page, map_len); |
1762 | | - return map_len; |
1763 | | -} |
1764 | | -EXPORT_SYMBOL_GPL(iomap_add_to_ioend); |
1765 | | - |
1766 | 1552 | static int iomap_writeback_range(struct iomap_writepage_ctx *wpc, |
1767 | 1553 | struct folio *folio, u64 pos, u32 rlen, u64 end_pos, |
1768 | 1554 | bool *wb_pending) |
|
0 commit comments