1 /* $OpenBSD: vfs_cluster.c,v 1.37 2007/05/26 20:26:51 pedro Exp $ */
2 /* $NetBSD: vfs_cluster.c,v 1.12 1996/04/22 01:39:05 christos Exp $ */
3
4 /*
5 * Copyright (c) 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)vfs_cluster.c 8.8 (Berkeley) 7/28/94
33 */
34
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/buf.h>
38 #include <sys/vnode.h>
39 #include <sys/mount.h>
40 #include <sys/malloc.h>
41 #include <sys/systm.h>
42 #include <sys/resourcevar.h>
43
44 #include <uvm/uvm_extern.h>
45
46 void cluster_wbuild(struct vnode *, struct buf *, long, daddr64_t, int,
47 daddr64_t);
48 struct cluster_save *cluster_collectbufs(struct vnode *, struct cluster_info *,
49 struct buf *);
50
51 /*
52 * Do clustered write for FFS.
53 *
54 * Three cases:
55 * 1. Write is not sequential (write asynchronously)
56 * Write is sequential:
57 * 2. beginning of cluster - begin cluster
58 * 3. middle of a cluster - add to cluster
59 * 4. end of a cluster - asynchronously write cluster
60 */
61 void
62 cluster_write(struct buf *bp, struct cluster_info *ci, u_quad_t filesize)
63 {
64 struct vnode *vp;
65 daddr64_t lbn;
66 int maxclen, cursize;
67
68 vp = bp->b_vp;
69 lbn = bp->b_lblkno;
70
71 /* Initialize vnode to beginning of file. */
72 if (lbn == 0)
73 ci->ci_lasta = ci->ci_clen = ci->ci_cstart = ci->ci_lastw = 0;
74
75 if (ci->ci_clen == 0 || lbn != ci->ci_lastw + 1 ||
76 (bp->b_blkno != ci->ci_lasta + btodb(bp->b_bcount))) {
77 maxclen = MAXBSIZE / vp->v_mount->mnt_stat.f_iosize - 1;
78 if (ci->ci_clen != 0) {
79 /*
80 * Next block is not sequential.
81 *
82 * If we are not writing at end of file, the process
83 * seeked to another point in the file since its
84 * last write, or we have reached our maximum
85 * cluster size, then push the previous cluster.
86 * Otherwise try reallocating to make it sequential.
87 */
88 cursize = ci->ci_lastw - ci->ci_cstart + 1;
89 if (((u_quad_t)(lbn + 1)) * bp->b_bcount != filesize ||
90 lbn != ci->ci_lastw + 1 || ci->ci_clen <= cursize) {
91 cluster_wbuild(vp, NULL, bp->b_bcount,
92 ci->ci_cstart, cursize, lbn);
93 } else {
94 struct buf **bpp, **endbp;
95 struct cluster_save *buflist;
96
97 buflist = cluster_collectbufs(vp, ci, bp);
98 endbp = &buflist->bs_children
99 [buflist->bs_nchildren - 1];
100 if (VOP_REALLOCBLKS(vp, buflist)) {
101 /*
102 * Failed, push the previous cluster.
103 */
104 for (bpp = buflist->bs_children;
105 bpp < endbp; bpp++)
106 brelse(*bpp);
107 free(buflist, M_VCLUSTER);
108 cluster_wbuild(vp, NULL, bp->b_bcount,
109 ci->ci_cstart, cursize, lbn);
110 } else {
111 /*
112 * Succeeded, keep building cluster.
113 */
114 for (bpp = buflist->bs_children;
115 bpp <= endbp; bpp++)
116 bdwrite(*bpp);
117 free(buflist, M_VCLUSTER);
118 ci->ci_lastw = lbn;
119 ci->ci_lasta = bp->b_blkno;
120 return;
121 }
122 }
123 }
124 /*
125 * Consider beginning a cluster.
126 * If at end of file, make cluster as large as possible,
127 * otherwise find size of existing cluster.
128 */
129 if ((u_quad_t)(lbn + 1) * (u_quad_t)bp->b_bcount != filesize &&
130 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) ||
131 bp->b_blkno == -1)) {
132 bawrite(bp);
133 ci->ci_clen = 0;
134 ci->ci_lasta = bp->b_blkno;
135 ci->ci_cstart = lbn + 1;
136 ci->ci_lastw = lbn;
137 return;
138 }
139 ci->ci_clen = maxclen;
140 if (maxclen == 0) { /* I/O not contiguous */
141 ci->ci_cstart = lbn + 1;
142 bawrite(bp);
143 } else { /* Wait for rest of cluster */
144 ci->ci_cstart = lbn;
145 bdwrite(bp);
146 }
147 } else if (lbn == ci->ci_cstart + ci->ci_clen) {
148 /*
149 * At end of cluster, write it out.
150 */
151 cluster_wbuild(vp, bp, bp->b_bcount, ci->ci_cstart,
152 ci->ci_clen + 1, lbn);
153 ci->ci_clen = 0;
154 ci->ci_cstart = lbn + 1;
155 } else
156 /*
157 * In the middle of a cluster, so just delay the
158 * I/O for now.
159 */
160 bdwrite(bp);
161 ci->ci_lastw = lbn;
162 ci->ci_lasta = bp->b_blkno;
163 }
164
165 /*
166 * The last lbn argument is the current block on which I/O is being
167 * performed. Check to see that it doesn't fall in the middle of
168 * the current block (if last_bp == NULL).
169 */
170 void
171 cluster_wbuild(struct vnode *vp, struct buf *last_bp, long size,
172 daddr64_t start_lbn, int len, daddr64_t lbn)
173 {
174 struct buf *bp;
175
176 #ifdef DIAGNOSTIC
177 if (size != vp->v_mount->mnt_stat.f_iosize)
178 panic("cluster_wbuild: size %ld != filesize %ld",
179 size, vp->v_mount->mnt_stat.f_iosize);
180 #endif
181 redo:
182 while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) {
183 ++start_lbn;
184 --len;
185 }
186
187 /* Get more memory for current buffer */
188 if (len <= 1) {
189 if (last_bp) {
190 bawrite(last_bp);
191 } else if (len) {
192 bp = getblk(vp, start_lbn, size, 0, 0);
193 /*
194 * The buffer could have already been flushed out of
195 * the cache. If that has happened, we'll get a new
196 * buffer here with random data, just drop it.
197 */
198 if ((bp->b_flags & B_DELWRI) == 0)
199 brelse(bp);
200 else
201 bawrite(bp);
202 }
203 return;
204 }
205
206 bp = getblk(vp, start_lbn, size, 0, 0);
207 if (!(bp->b_flags & B_DELWRI)) {
208 ++start_lbn;
209 --len;
210 brelse(bp);
211 goto redo;
212 }
213
214 ++start_lbn;
215 --len;
216 bawrite(bp);
217 goto redo;
218 }
219
220 /*
221 * Collect together all the buffers in a cluster.
222 * Plus add one additional buffer.
223 */
224 struct cluster_save *
225 cluster_collectbufs(struct vnode *vp, struct cluster_info *ci,
226 struct buf *last_bp)
227 {
228 struct cluster_save *buflist;
229 daddr64_t lbn;
230 int i, len;
231
232 len = ci->ci_lastw - ci->ci_cstart + 1;
233 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
234 M_VCLUSTER, M_WAITOK);
235 buflist->bs_nchildren = 0;
236 buflist->bs_children = (struct buf **)(buflist + 1);
237 for (lbn = ci->ci_cstart, i = 0; i < len; lbn++, i++)
238 (void)bread(vp, lbn, last_bp->b_bcount, NOCRED,
239 &buflist->bs_children[i]);
240 buflist->bs_children[i] = last_bp;
241 buflist->bs_nchildren = i + 1;
242 return (buflist);
243 }