* The actual compression is based on zlib, see the other files.
*/
+/* Linear Addressing code
+ *
+ * Copyright (C) 2000 Shane Nay.
+ *
+ * Allows you to have a linearly addressed cramfs filesystem.
+ * Saves the need for buffer, and the munging of the buffer.
+ * Savings a bit over 32k with default PAGE_SIZE, BUFFER_SIZE
+ * etc. Usefull on embedded platform with ROM :-).
+ *
+ * Downsides- Currently linear addressed cramfs partitions
+ * don't co-exist with block cramfs partitions.
+ *
+ */
+
+/*
+ * 28-Dec-2000: XIP mode for linear cramfs
+ * Copyright (C) 2000 Robert Leslie <rob@mars.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
+#include <asm/tlbflush.h>
-static const struct super_operations cramfs_ops;
-static const struct inode_operations cramfs_dir_inode_operations;
+static struct super_operations cramfs_ops;
+static struct inode_operations cramfs_dir_inode_operations;
static const struct file_operations cramfs_directory_operations;
static const struct address_space_operations cramfs_aops;
return 0;
}
+ #ifdef CONFIG_CRAMFS_LINEAR_XIP
+ static int cramfs_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+ unsigned long address, length;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
+ struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
+
+ /* this is only used in the case of read-only maps for XIP */
+
+ if (vma->vm_flags & VM_WRITE)
+ return generic_file_mmap(file, vma);
+
+ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ return -EINVAL;
+
+ address = PAGE_ALIGN(sbi->linear_phys_addr + OFFSET(inode));
+ address += vma->vm_pgoff << PAGE_SHIFT;
+
+ length = vma->vm_end - vma->vm_start;
+
+ if (length > inode->i_size)
+ length = inode->i_size;
+
+ length = PAGE_ALIGN(length);
+
+ /*
+ * Don't dump addresses that are not real memory to a core file.
+ */
+ vma->vm_flags |= (VM_IO | VM_XIP);
+ flush_tlb_page(vma, address);
+ if (remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, length,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ #ifdef DEBUG_CRAMFS_XIP
+ printk("cramfs_mmap: mapped %s at 0x%08lx, length %lu to vma 0x%08lx"
+ ", page_prot 0x%08lx\n",
+ file->f_dentry->d_name.name, address, length,
+ vma->vm_start, pgprot_val(vma->vm_page_prot));
+ #endif
+
+ return 0;
+ }
+
+ static struct file_operations cramfs_linear_xip_fops = {
+ aio_read: generic_file_aio_read,
+ read: do_sync_read,
+ mmap: cramfs_mmap,
+ };
+
+ #define CRAMFS_INODE_IS_XIP(x) ((x)->i_mode & S_ISVTX)
+
+ #endif
+
+ #ifdef CONFIG_CRAMFS_LINEAR
+ static struct backing_dev_info cramfs_backing_dev_info = {
+ .ra_pages = 0, /* No readahead */
+ };
+ #endif
+
static struct inode *get_cramfs_inode(struct super_block *sb,
struct cramfs_inode * cramfs_inode)
{
cramfs_iget5_test, cramfs_iget5_set,
cramfs_inode);
if (inode && (inode->i_state & I_NEW)) {
+#ifdef CONFIG_CRAMFS_LINEAR
+ inode->i_mapping->backing_dev_info = &cramfs_backing_dev_info;
+#endif
+
+#ifdef CONFIG_CRAMFS_LINEAR_XIP
+ if(S_ISREG(inode->i_mode))
+ inode->i_fop = CRAMFS_INODE_IS_XIP(inode) ? &cramfs_linear_xip_fops : &generic_ro_fops;
+#endif
unlock_new_inode(inode);
}
return inode;
}
+#ifdef CONFIG_CRAMFS_LINEAR
+/*
+ * Return a pointer to the block in the linearly addressed cramfs image.
+ */
+static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
+{
+ struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
+
+ if (!len)
+ return NULL;
+ return (void*)(sbi->linear_virt_addr + offset);
+}
+#else /* Not linear addressing - aka regular block mode. */
/*
* We have our own block cache: don't fill up the buffer cache
* with the rom-image, because the way the filesystem is set
}
return read_buffers[buffer] + offset;
}
+#endif /* CONFIG_CRAMFS_LINEAR */
static void cramfs_put_super(struct super_block *sb)
{
static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
{
+#ifndef CONFIG_CRAMFS_LINEAR
int i;
+#else
+ char *p;
+#endif
struct cramfs_super super;
unsigned long root_offset;
struct cramfs_sb_info *sbi;
return -ENOMEM;
sb->s_fs_info = sbi;
+#ifndef CONFIG_CRAMFS_LINEAR
/* Invalidate the read buffers on mount: think disk change.. */
mutex_lock(&read_mutex);
for (i = 0; i < READ_BUFFERS; i++)
buffer_blocknr[i] = -1;
+#else /* CONFIG_CRAMFS_LINEAR */
+ /*
+ * The physical location of the cramfs image is specified as
+ * a mount parameter. This parameter is mandatory for obvious
+ * reasons. Some validation is made on the phys address but this
+ * is not exhaustive and we count on the fact that someone using
+ * this feature is supposed to know what he/she's doing.
+ */
+ if (!data || !(p = strstr((char *)data, "physaddr="))) {
+ printk(KERN_ERR "cramfs: unknown physical address for linear cramfs image\n");
+ goto out;
+ }
+ sbi->linear_phys_addr = simple_strtoul(p + 9, NULL, 0);
+ if (sbi->linear_phys_addr & (PAGE_SIZE-1)) {
+ printk(KERN_ERR "cramfs: physical address 0x%lx for linear cramfs isn't aligned to a page boundary\n",
+ sbi->linear_phys_addr);
+ goto out;
+ }
+ if (sbi->linear_phys_addr == 0) {
+ printk(KERN_ERR "cramfs: physical address for linear cramfs image can't be 0\n");
+ goto out;
+ }
+ printk(KERN_INFO "cramfs: checking physical address 0x%lx for linear cramfs image\n",
+ sbi->linear_phys_addr);
+
+ /* Map only one page for now. Will remap it when fs size is known. */
+ sbi->linear_virt_addr =
+ ioremap(sbi->linear_phys_addr, PAGE_SIZE);
+ if (!sbi->linear_virt_addr) {
+ printk(KERN_ERR "cramfs: ioremap of the linear cramfs image failed\n");
+ goto out;
+ }
+
+ mutex_lock(&read_mutex);
+#endif /* CONFIG_CRAMFS_LINEAR */
+
/* Read the first block and get the superblock from it */
memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
mutex_unlock(&read_mutex);
iput(root);
goto out;
}
+
+#ifdef CONFIG_CRAMFS_LINEAR
+ /* Remap the whole filesystem now */
+ iounmap(sbi->linear_virt_addr);
+ printk(KERN_INFO "cramfs: linear cramfs image appears to be %lu KB in size\n",
+ sbi->size/1024);
+
+ sbi->linear_virt_addr =
+ ioremap_cached(sbi->linear_phys_addr, sbi->size);
+
+ if (!sbi->linear_virt_addr) {
+ printk(KERN_ERR "cramfs: ioremap of the linear cramfs image failed\n");
+ goto out;
+ }
+#endif /* CONFIG_CRAMFS_LINEAR */
return 0;
out:
+#ifdef CONFIG_CRAMFS_LINEAR
+ if (sbi->linear_virt_addr)
+ iounmap(sbi->linear_virt_addr);
+#endif /* CONFIG_CRAMFS_LINEAR */
kfree(sbi);
sb->s_fs_info = NULL;
return -EINVAL;
u32 blkptr_offset = OFFSET(inode) + page->index*4;
u32 start_offset, compr_len;
+#ifdef CONFIG_CRAMFS_LINEAR_XIP
+ if(CRAMFS_INODE_IS_XIP(inode)) {
+ blkptr_offset =
+ PAGE_ALIGN(OFFSET(inode)) +
+ page->index * PAGE_CACHE_SIZE;
+ mutex_lock(&read_mutex);
+ memcpy(page_address(page),
+ cramfs_read(sb, blkptr_offset, PAGE_CACHE_SIZE),
+ PAGE_CACHE_SIZE);
+ mutex_unlock(&read_mutex);
+ bytes_filled = PAGE_CACHE_SIZE;
+ pgdata = kmap(page);
+ } else {
+#endif /* CONFIG_CRAMFS_LINEAR_XIP */
start_offset = OFFSET(inode) + maxblock*4;
mutex_lock(&read_mutex);
if (page->index)
compr_len);
mutex_unlock(&read_mutex);
}
+#ifdef CONFIG_CRAMFS_LINEAR_XIP
+ }
+#endif /* CONFIG_CRAMFS_LINEAR_XIP */
} else
pgdata = kmap(page);
memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
.readdir = cramfs_readdir,
};
-static const struct inode_operations cramfs_dir_inode_operations = {
+static struct inode_operations cramfs_dir_inode_operations = {
.lookup = cramfs_lookup,
};
-static const struct super_operations cramfs_ops = {
+static struct super_operations cramfs_ops = {
.put_super = cramfs_put_super,
.remount_fs = cramfs_remount,
.statfs = cramfs_statfs,
static int cramfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, cramfs_fill_super,
- mnt);
+
+#ifdef CONFIG_CRAMFS_LINEAR
+ return get_sb_nodev(fs_type, flags, data, cramfs_fill_super,mnt);
+#else
+ return get_sb_bdev(fs_type, flags, dev_name, data, cramfs_fill_super, mnt);
+#endif
}
static struct file_system_type cramfs_fs_type = {
.name = "cramfs",
.get_sb = cramfs_get_sb,
.kill_sb = kill_block_super,
+#ifndef CONFIG_CRAMFS_LINEAR
.fs_flags = FS_REQUIRES_DEV,
+#endif /* CONFIG_CRAMFS_LINEAR */
};
static int __init init_cramfs_fs(void)
continue;
}
- if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ if (!vma || ((vma->vm_flags & (VM_IO | VM_PFNMAP))
+ && !(vma->vm_flags & VM_XIP))
|| !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
return pte;
}
+/*
+ * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock
+ */
+static inline void break_cow(struct vm_area_struct * vma, struct page * new_page, unsigned long address,
+ pte_t *page_table)
+{
+ pte_t entry;
+
+ entry = maybe_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)),
+ vma);
+ ptep_establish(vma, address, page_table, entry);
+ update_mmu_cache(vma, address, entry);
+ lazy_mmu_prot_update(entry);
+}
+
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
/*
spinlock_t *ptl, pte_t orig_pte)
{
struct page *old_page, *new_page;
+ unsigned long pfn = pte_pfn(orig_pte);
pte_t entry;
int reuse = 0, ret = VM_FAULT_MINOR;
struct page *dirty_page = NULL;
+ if (unlikely(!pfn_valid(pfn))) {
+ if ((vma->vm_flags & VM_XIP) && pte_present(orig_pte) &&
+ pte_read(orig_pte)) {
+ /*
+ * Handle COW of XIP memory.
+ * Note that the source memory actually isn't a ram
+ * page so no struct page is associated to the source
+ * pte.
+ */
+ char *dst;
+ int ret;
+
+ spin_unlock(&mm->page_table_lock);
+ new_page = alloc_page(GFP_HIGHUSER);
+ if (!new_page)
+ return VM_FAULT_OOM;
+
+ /* copy XIP data to memory */
+
+ dst = kmap_atomic(new_page, KM_USER0);
+ ret = copy_from_user(dst, (void*)address, PAGE_SIZE);
+ kunmap_atomic(dst, KM_USER0);
+
+ /* make sure pte didn't change while we dropped the
+ lock */
+ spin_lock(&mm->page_table_lock);
+ if (!ret && pte_same(*page_table, orig_pte)) {
+ inc_mm_counter(mm, file_rss);
+ break_cow(vma, new_page, address, page_table);
+ lru_cache_add(new_page);
+ page_add_file_rmap(new_page);
+ spin_unlock(&mm->page_table_lock);
+ return VM_FAULT_MINOR; /* Minor fault */
+ }
+
+ /* pte changed: back off */
+ spin_unlock(&mm->page_table_lock);
+ page_cache_release(new_page);
+ return ret ? VM_FAULT_OOM : VM_FAULT_MINOR;
+ }
+ }
+
old_page = vm_normal_page(vma, address, orig_pte);
if (!old_page)
goto gotten;