debuggers.hg

annotate xen/common/tmem_xen.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents 051a1b1b8f8a
children
rev   line source
keir@19684 1 /******************************************************************************
keir@19684 2 * tmem-xen.c
keir@19684 3 *
keir@19684 4 * Xen-specific Transcendent memory
keir@19684 5 *
keir@19684 6 * Copyright (c) 2009, Dan Magenheimer, Oracle Corp.
keir@19684 7 */
keir@19684 8
keir@19684 9 #include <xen/tmem.h>
keir@19684 10 #include <xen/tmem_xen.h>
keir@19684 11 #include <xen/lzo.h> /* compression code */
keir@19684 12 #include <xen/paging.h>
keir@19684 13 #include <xen/domain_page.h>
keir@21436 14 #include <xen/cpu.h>
keir@19684 15
keir@19684 16 #define EXPORT /* indicates code other modules are dependent upon */
keir@19684 17
keir@22829 18 EXPORT bool_t __read_mostly opt_tmem = 0;
keir@19684 19 boolean_param("tmem", opt_tmem);
keir@19684 20
keir@22676 21 EXPORT bool_t __read_mostly opt_tmem_compress = 0;
keir@19684 22 boolean_param("tmem_compress", opt_tmem_compress);
keir@19684 23
keir@22676 24 EXPORT bool_t __read_mostly opt_tmem_dedup = 0;
keir@21149 25 boolean_param("tmem_dedup", opt_tmem_dedup);
keir@21149 26
keir@22676 27 EXPORT bool_t __read_mostly opt_tmem_tze = 0;
keir@21149 28 boolean_param("tmem_tze", opt_tmem_tze);
keir@21149 29
keir@22676 30 EXPORT bool_t __read_mostly opt_tmem_shared_auth = 0;
keir@20067 31 boolean_param("tmem_shared_auth", opt_tmem_shared_auth);
keir@20067 32
keir@22676 33 EXPORT int __read_mostly opt_tmem_lock = 0;
keir@19684 34 integer_param("tmem_lock", opt_tmem_lock);
keir@19684 35
keir@20079 36 EXPORT atomic_t freeable_page_count = ATOMIC_INIT(0);
keir@20079 37
keir@19684 38 #ifdef COMPARE_COPY_PAGE_SSE2
keir@19684 39 DECL_CYC_COUNTER(pg_copy1);
keir@19684 40 DECL_CYC_COUNTER(pg_copy2);
keir@19684 41 DECL_CYC_COUNTER(pg_copy3);
keir@19684 42 DECL_CYC_COUNTER(pg_copy4);
keir@19684 43 #else
keir@19684 44 DECL_CYC_COUNTER(pg_copy);
keir@19684 45 #endif
keir@19684 46
keir@19684 47 /* these are a concurrency bottleneck, could be percpu and dynamically
keir@19684 48 * allocated iff opt_tmem_compress */
keir@19684 49 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
keir@19684 50 #define LZO_DSTMEM_PAGES 2
keir@19964 51 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem);
keir@19964 52 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem);
keir@19684 53
keir@19684 54 #ifdef COMPARE_COPY_PAGE_SSE2
keir@19684 55 #include <asm/flushtlb.h> /* REMOVE ME AFTER TEST */
keir@19684 56 #include <asm/page.h> /* REMOVE ME AFTER TEST */
keir@19684 57 #endif
keir@19684 58 void tmh_copy_page(char *to, char*from)
keir@19684 59 {
keir@19684 60 #ifdef COMPARE_COPY_PAGE_SSE2
keir@19684 61 DECL_LOCAL_CYC_COUNTER(pg_copy1);
keir@19684 62 DECL_LOCAL_CYC_COUNTER(pg_copy2);
keir@19684 63 DECL_LOCAL_CYC_COUNTER(pg_copy3);
keir@19684 64 DECL_LOCAL_CYC_COUNTER(pg_copy4);
keir@19684 65 *to = *from; /* don't measure TLB misses */
keir@19684 66 flush_area_local(to,FLUSH_CACHE|FLUSH_ORDER(0));
keir@19684 67 flush_area_local(from,FLUSH_CACHE|FLUSH_ORDER(0));
keir@19684 68 START_CYC_COUNTER(pg_copy1);
keir@19684 69 copy_page_sse2(to, from); /* cold cache */
keir@19684 70 END_CYC_COUNTER(pg_copy1);
keir@19684 71 START_CYC_COUNTER(pg_copy2);
keir@19684 72 copy_page_sse2(to, from); /* hot cache */
keir@19684 73 END_CYC_COUNTER(pg_copy2);
keir@19684 74 flush_area_local(to,FLUSH_CACHE|FLUSH_ORDER(0));
keir@19684 75 flush_area_local(from,FLUSH_CACHE|FLUSH_ORDER(0));
keir@19684 76 START_CYC_COUNTER(pg_copy3);
keir@19684 77 memcpy(to, from, PAGE_SIZE); /* cold cache */
keir@19684 78 END_CYC_COUNTER(pg_copy3);
keir@19684 79 START_CYC_COUNTER(pg_copy4);
keir@19684 80 memcpy(to, from, PAGE_SIZE); /* hot cache */
keir@19684 81 END_CYC_COUNTER(pg_copy4);
keir@19684 82 #else
keir@19684 83 DECL_LOCAL_CYC_COUNTER(pg_copy);
keir@19684 84 START_CYC_COUNTER(pg_copy);
keir@19684 85 memcpy(to, from, PAGE_SIZE);
keir@19684 86 END_CYC_COUNTER(pg_copy);
keir@19684 87 #endif
keir@19684 88 }
keir@19684 89
keir@19684 90 #ifdef __ia64__
keir@22240 91 static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
keir@22240 92 pfp_t **pcli_pfp, bool_t cli_write)
keir@19684 93 {
keir@19684 94 ASSERT(0);
keir@20568 95 return NULL;
keir@19684 96 }
keir@22240 97
keir@22329 98 static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
keir@22329 99 unsigned long cli_mfn, bool_t mark_dirty)
keir@22240 100 {
keir@22240 101 ASSERT(0);
keir@22240 102 }
keir@19684 103 #else
keir@22240 104 static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
keir@22240 105 pfp_t **pcli_pfp, bool_t cli_write)
keir@19684 106 {
keir@19684 107 unsigned long cli_mfn;
keir@19684 108 p2m_type_t t;
keir@22240 109 struct page_info *page;
keir@22240 110 int ret;
keir@19684 111
keir@21986 112 cli_mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(current->domain), cmfn, &t));
keir@22240 113 if ( t != p2m_ram_rw || !mfn_valid(cli_mfn) )
keir@22240 114 return NULL;
keir@22240 115 page = mfn_to_page(cli_mfn);
keir@22240 116 if ( cli_write )
keir@22240 117 ret = get_page_and_type(page, current->domain, PGT_writable_page);
keir@22240 118 else
keir@22240 119 ret = get_page(page, current->domain);
keir@22240 120 if ( !ret )
keir@19684 121 return NULL;
keir@22240 122 *pcli_mfn = cli_mfn;
keir@22240 123 *pcli_pfp = (pfp_t *)page;
keir@19684 124 return map_domain_page(cli_mfn);
keir@19684 125 }
keir@22240 126
keir@22240 127 static inline void cli_put_page(void *cli_va, pfp_t *cli_pfp,
keir@22240 128 unsigned long cli_mfn, bool_t mark_dirty)
keir@22240 129 {
keir@22240 130 if ( mark_dirty )
keir@22240 131 {
keir@22240 132 put_page_and_type((struct page_info *)cli_pfp);
keir@22240 133 paging_mark_dirty(current->domain,cli_mfn);
keir@22240 134 }
keir@22240 135 else
keir@22240 136 put_page((struct page_info *)cli_pfp);
keir@22240 137 unmap_domain_page(cli_va);
keir@22240 138 }
keir@19684 139 #endif
keir@19684 140
keir@19684 141 EXPORT int tmh_copy_from_client(pfp_t *pfp,
keir@21149 142 tmem_cli_mfn_t cmfn, pagesize_t tmem_offset,
keir@21149 143 pagesize_t pfn_offset, pagesize_t len, void *cli_va)
keir@19684 144 {
keir@22240 145 unsigned long tmem_mfn, cli_mfn = 0;
keir@20067 146 void *tmem_va;
keir@22240 147 pfp_t *cli_pfp = NULL;
keir@22240 148 bool_t tmemc = cli_va != NULL; /* if true, cli_va is control-op buffer */
keir@19684 149
keir@19684 150 ASSERT(pfp != NULL);
keir@19684 151 tmem_mfn = page_to_mfn(pfp);
keir@19684 152 tmem_va = map_domain_page(tmem_mfn);
keir@22240 153 if ( tmem_offset == 0 && pfn_offset == 0 && len == 0 )
keir@22240 154 {
keir@22240 155 memset(tmem_va, 0, PAGE_SIZE);
keir@22240 156 unmap_domain_page(tmem_va);
keir@22240 157 return 1;
keir@22240 158 }
keir@22240 159 if ( !tmemc )
keir@22240 160 {
keir@22240 161 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
keir@22240 162 if ( cli_va == NULL )
keir@22240 163 return -EFAULT;
keir@22240 164 }
keir@19684 165 mb();
keir@22240 166 if (len == PAGE_SIZE && !tmem_offset && !pfn_offset)
keir@19684 167 tmh_copy_page(tmem_va, cli_va);
keir@19684 168 else if ( (tmem_offset+len <= PAGE_SIZE) &&
keir@22240 169 (pfn_offset+len <= PAGE_SIZE) )
keir@19684 170 memcpy((char *)tmem_va+tmem_offset,(char *)cli_va+pfn_offset,len);
keir@22240 171 if ( !tmemc )
keir@22240 172 cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
keir@19684 173 unmap_domain_page(tmem_va);
keir@19684 174 return 1;
keir@19684 175 }
keir@19684 176
keir@19684 177 EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn,
keir@20067 178 void **out_va, size_t *out_len, void *cli_va)
keir@19684 179 {
keir@19684 180 int ret = 0;
keir@19684 181 unsigned char *dmem = this_cpu(dstmem);
keir@19684 182 unsigned char *wmem = this_cpu(workmem);
keir@22240 183 pfp_t *cli_pfp = NULL;
keir@22240 184 unsigned long cli_mfn = 0;
keir@22240 185 bool_t tmemc = cli_va != NULL; /* if true, cli_va is control-op buffer */
keir@19684 186
keir@19684 187 if ( dmem == NULL || wmem == NULL )
keir@19684 188 return 0; /* no buffer, so can't compress */
keir@22240 189 if ( !tmemc )
keir@22240 190 {
keir@22240 191 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
keir@22240 192 if ( cli_va == NULL )
keir@22240 193 return -EFAULT;
keir@22240 194 }
keir@19684 195 mb();
keir@19684 196 ret = lzo1x_1_compress(cli_va, PAGE_SIZE, dmem, out_len, wmem);
keir@19684 197 ASSERT(ret == LZO_E_OK);
keir@19684 198 *out_va = dmem;
keir@22240 199 if ( !tmemc )
keir@22240 200 cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
keir@19684 201 unmap_domain_page(cli_va);
keir@19684 202 return 1;
keir@19684 203 }
keir@19684 204
keir@19684 205 EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t *pfp,
keir@21149 206 pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, void *cli_va)
keir@19684 207 {
keir@20067 208 unsigned long tmem_mfn, cli_mfn = 0;
keir@20067 209 void *tmem_va;
keir@22240 210 pfp_t *cli_pfp = NULL;
keir@22240 211 bool_t tmemc = cli_va != NULL; /* if true, cli_va is control-op buffer */
keir@19684 212
keir@19684 213 ASSERT(pfp != NULL);
keir@22240 214 if ( !tmemc )
keir@22240 215 {
keir@22240 216 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1);
keir@22240 217 if ( cli_va == NULL )
keir@22240 218 return -EFAULT;
keir@22240 219 }
keir@19684 220 tmem_mfn = page_to_mfn(pfp);
keir@19684 221 tmem_va = map_domain_page(tmem_mfn);
keir@19684 222 if (len == PAGE_SIZE && !tmem_offset && !pfn_offset)
keir@19684 223 tmh_copy_page(cli_va, tmem_va);
keir@19684 224 else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) )
keir@19684 225 memcpy((char *)cli_va+pfn_offset,(char *)tmem_va+tmem_offset,len);
keir@19684 226 unmap_domain_page(tmem_va);
keir@22240 227 if ( !tmemc )
keir@22240 228 cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
keir@19684 229 mb();
keir@19684 230 return 1;
keir@19684 231 }
keir@19684 232
keir@20067 233 EXPORT int tmh_decompress_to_client(tmem_cli_mfn_t cmfn, void *tmem_va,
keir@20067 234 size_t size, void *cli_va)
keir@19684 235 {
keir@20067 236 unsigned long cli_mfn = 0;
keir@22240 237 pfp_t *cli_pfp = NULL;
keir@19684 238 size_t out_len = PAGE_SIZE;
keir@22240 239 bool_t tmemc = cli_va != NULL; /* if true, cli_va is control-op buffer */
keir@19684 240 int ret;
keir@19684 241
keir@22240 242 if ( !tmemc )
keir@22240 243 {
keir@22240 244 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1);
keir@22240 245 if ( cli_va == NULL )
keir@22240 246 return -EFAULT;
keir@22240 247 }
keir@19684 248 ret = lzo1x_decompress_safe(tmem_va, size, cli_va, &out_len);
keir@19684 249 ASSERT(ret == LZO_E_OK);
keir@19684 250 ASSERT(out_len == PAGE_SIZE);
keir@22240 251 if ( !tmemc )
keir@22240 252 cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
keir@19684 253 mb();
keir@19684 254 return 1;
keir@19684 255 }
keir@19684 256
keir@21149 257 EXPORT int tmh_copy_tze_to_client(tmem_cli_mfn_t cmfn, void *tmem_va,
keir@21149 258 pagesize_t len)
keir@21149 259 {
keir@21149 260 void *cli_va;
keir@21149 261 unsigned long cli_mfn;
keir@22240 262 pfp_t *cli_pfp = NULL;
keir@21149 263
keir@21149 264 ASSERT(!(len & (sizeof(uint64_t)-1)));
keir@21149 265 ASSERT(len <= PAGE_SIZE);
keir@21149 266 ASSERT(len > 0 || tmem_va == NULL);
keir@22240 267 cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1);
keir@22240 268 if ( cli_va == NULL )
keir@21149 269 return -EFAULT;
keir@21149 270 if ( len > 0 )
keir@21149 271 memcpy((char *)cli_va,(char *)tmem_va,len);
keir@21149 272 if ( len < PAGE_SIZE )
keir@21149 273 memset((char *)cli_va+len,0,PAGE_SIZE-len);
keir@22240 274 cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
keir@21149 275 mb();
keir@21149 276 return 1;
keir@21149 277 }
keir@21149 278
keir@19684 279 /****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/
keir@19684 280
keir@19684 281 EXPORT struct xmem_pool *tmh_mempool = 0;
keir@19684 282 EXPORT unsigned int tmh_mempool_maxalloc = 0;
keir@19684 283
keir@19684 284 EXPORT DEFINE_SPINLOCK(tmh_page_list_lock);
keir@19684 285 EXPORT PAGE_LIST_HEAD(tmh_page_list);
keir@19684 286 EXPORT unsigned long tmh_page_list_pages = 0;
keir@19684 287
keir@19684 288 /* free anything on tmh_page_list to Xen's scrub list */
keir@19684 289 EXPORT void tmh_release_avail_pages_to_host(void)
keir@19684 290 {
keir@19684 291 spin_lock(&tmh_page_list_lock);
keir@19924 292 while ( !page_list_empty(&tmh_page_list) )
keir@19684 293 {
keir@20504 294 struct page_info *pg = page_list_remove_head(&tmh_page_list);
keir@19924 295 scrub_one_page(pg);
keir@20504 296 tmh_page_list_pages--;
keir@19924 297 free_domheap_page(pg);
keir@19684 298 }
keir@20504 299 ASSERT(tmh_page_list_pages == 0);
keir@19924 300 INIT_PAGE_LIST_HEAD(&tmh_page_list);
keir@19684 301 spin_unlock(&tmh_page_list_lock);
keir@19684 302 }
keir@19684 303
keir@19684 304 EXPORT void tmh_scrub_page(struct page_info *pi, unsigned int memflags)
keir@19684 305 {
keir@19684 306 if ( pi == NULL )
keir@19684 307 return;
keir@19684 308 if ( !(memflags & MEMF_tmem) )
keir@19684 309 scrub_one_page(pi);
keir@19684 310 }
keir@19684 311
keir@19684 312 #ifndef __i386__
keir@19684 313 static noinline void *tmh_mempool_page_get(unsigned long size)
keir@19684 314 {
keir@19684 315 struct page_info *pi;
keir@19684 316
keir@19684 317 ASSERT(size == PAGE_SIZE);
keir@19684 318 if ( (pi = tmh_alloc_page(NULL,0)) == NULL )
keir@19684 319 return NULL;
keir@19684 320 ASSERT(IS_VALID_PAGE(pi));
keir@19684 321 return page_to_virt(pi);
keir@19684 322 }
keir@19684 323
keir@19684 324 static void tmh_mempool_page_put(void *page_va)
keir@19684 325 {
keir@19684 326 ASSERT(IS_PAGE_ALIGNED(page_va));
keir@19684 327 tmh_free_page(virt_to_page(page_va));
keir@19684 328 }
keir@19684 329
keir@21436 330 static int __init tmh_mempool_init(void)
keir@19684 331 {
keir@19684 332 tmh_mempool = xmem_pool_create("tmem", tmh_mempool_page_get,
keir@19684 333 tmh_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
keir@19684 334 if ( tmh_mempool )
keir@19684 335 tmh_mempool_maxalloc = xmem_pool_maxalloc(tmh_mempool);
keir@19684 336 return tmh_mempool != NULL;
keir@19684 337 }
keir@19684 338
keir@19684 339 /* persistent pools are per-domain */
keir@19684 340
keir@19684 341 static void *tmh_persistent_pool_page_get(unsigned long size)
keir@19684 342 {
keir@19684 343 struct page_info *pi;
keir@19684 344 struct domain *d = current->domain;
keir@19684 345
keir@19684 346 ASSERT(size == PAGE_SIZE);
keir@19684 347 if ( (pi = _tmh_alloc_page_thispool(d)) == NULL )
keir@19684 348 return NULL;
keir@19684 349 ASSERT(IS_VALID_PAGE(pi));
keir@20277 350 return __map_domain_page(pi);
keir@19684 351 }
keir@19684 352
keir@19684 353 static void tmh_persistent_pool_page_put(void *page_va)
keir@19684 354 {
keir@19684 355 struct page_info *pi;
keir@19684 356
keir@19684 357 ASSERT(IS_PAGE_ALIGNED(page_va));
keir@19684 358 pi = virt_to_page(page_va);
keir@19684 359 ASSERT(IS_VALID_PAGE(pi));
keir@19684 360 _tmh_free_page_thispool(pi);
keir@19684 361 }
keir@19684 362 #endif
keir@19684 363
keir@19684 364 /****************** XEN-SPECIFIC CLIENT HANDLING ********************/
keir@19684 365
keir@20964 366 EXPORT tmh_client_t *tmh_client_init(cli_id_t cli_id)
keir@19684 367 {
keir@19684 368 tmh_client_t *tmh;
keir@19684 369 char name[5];
keir@19684 370 int i, shift;
keir@19684 371
keir@19684 372 if ( (tmh = xmalloc(tmh_client_t)) == NULL )
keir@19684 373 return NULL;
keir@19684 374 for (i = 0, shift = 12; i < 4; shift -=4, i++)
keir@20964 375 name[i] = (((unsigned short)cli_id >> shift) & 0xf) + '0';
keir@19684 376 name[4] = '\0';
keir@19684 377 #ifndef __i386__
keir@19684 378 tmh->persistent_pool = xmem_pool_create(name, tmh_persistent_pool_page_get,
keir@19684 379 tmh_persistent_pool_page_put, PAGE_SIZE, 0, PAGE_SIZE);
keir@19684 380 if ( tmh->persistent_pool == NULL )
keir@19684 381 {
keir@19684 382 xfree(tmh);
keir@19684 383 return NULL;
keir@19684 384 }
keir@19684 385 #endif
keir@19684 386 return tmh;
keir@19684 387 }
keir@19684 388
keir@19684 389 EXPORT void tmh_client_destroy(tmh_client_t *tmh)
keir@19684 390 {
keir@21642 391 ASSERT(tmh->domain->is_dying);
keir@19684 392 #ifndef __i386__
keir@19684 393 xmem_pool_destroy(tmh->persistent_pool);
keir@19684 394 #endif
keir@20964 395 tmh->domain = NULL;
keir@19684 396 }
keir@19684 397
keir@19684 398 /****************** XEN-SPECIFIC HOST INITIALIZATION ********************/
keir@19684 399
keir@21436 400 #ifndef __i386__
keir@21436 401
keir@21436 402 static int dstmem_order, workmem_order;
keir@21436 403
keir@21436 404 static int cpu_callback(
keir@21436 405 struct notifier_block *nfb, unsigned long action, void *hcpu)
keir@19684 406 {
keir@21436 407 unsigned int cpu = (unsigned long)hcpu;
keir@21436 408
keir@21436 409 switch ( action )
keir@21436 410 {
keir@21436 411 case CPU_UP_PREPARE: {
keir@21436 412 if ( per_cpu(dstmem, cpu) == NULL )
keir@21436 413 {
keir@21436 414 struct page_info *p = alloc_domheap_pages(0, dstmem_order, 0);
keir@21436 415 per_cpu(dstmem, cpu) = p ? page_to_virt(p) : NULL;
keir@21436 416 }
keir@21436 417 if ( per_cpu(workmem, cpu) == NULL )
keir@21436 418 {
keir@21436 419 struct page_info *p = alloc_domheap_pages(0, workmem_order, 0);
keir@21436 420 per_cpu(workmem, cpu) = p ? page_to_virt(p) : NULL;
keir@21436 421 }
keir@21436 422 break;
keir@21436 423 }
keir@21436 424 case CPU_DEAD:
keir@21436 425 case CPU_UP_CANCELED: {
keir@21436 426 if ( per_cpu(dstmem, cpu) != NULL )
keir@21436 427 {
keir@21436 428 struct page_info *p = virt_to_page(per_cpu(dstmem, cpu));
keir@21436 429 free_domheap_pages(p, dstmem_order);
keir@21436 430 per_cpu(dstmem, cpu) = NULL;
keir@21436 431 }
keir@21436 432 if ( per_cpu(workmem, cpu) != NULL )
keir@21436 433 {
keir@21436 434 struct page_info *p = virt_to_page(per_cpu(workmem, cpu));
keir@21436 435 free_domheap_pages(p, workmem_order);
keir@21436 436 per_cpu(workmem, cpu) = NULL;
keir@21436 437 }
keir@21436 438 break;
keir@21436 439 }
keir@21436 440 default:
keir@21436 441 break;
keir@21436 442 }
keir@21436 443
keir@21436 444 return NOTIFY_DONE;
keir@21436 445 }
keir@21436 446
keir@21436 447 static struct notifier_block cpu_nfb = {
keir@21436 448 .notifier_call = cpu_callback
keir@21436 449 };
keir@21436 450
keir@21436 451 EXPORT int __init tmh_init(void)
keir@21436 452 {
keir@21436 453 unsigned int cpu;
keir@19684 454
keir@19684 455 if ( !tmh_mempool_init() )
keir@19684 456 return 0;
keir@19684 457
keir@19684 458 dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
keir@19684 459 workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
keir@21436 460
keir@21436 461 for_each_online_cpu ( cpu )
keir@19684 462 {
keir@21436 463 void *hcpu = (void *)(long)cpu;
keir@21436 464 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
keir@19684 465 }
keir@21436 466
keir@21436 467 register_cpu_notifier(&cpu_nfb);
keir@21436 468
keir@19684 469 return 1;
keir@19684 470 }
keir@21436 471
keir@21436 472 #else
keir@21436 473
keir@21436 474 EXPORT int __init tmh_init(void)
keir@21436 475 {
keir@21436 476 return 1;
keir@21436 477 }
keir@21436 478
keir@21436 479 #endif