Changeset 29484 in osm for applications/editors/josm/plugins/imagerycache/src
- Timestamp:
- 2013-04-07T17:07:27+02:00 (12 years ago)
- Location:
- applications/editors/josm/plugins/imagerycache/src/org
- Files:
-
- 6 added
- 4 deleted
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
applications/editors/josm/plugins/imagerycache/src/org/mapdb/AsyncWriteEngine.java
r29363 r29484 43 43 44 44 protected static final Object DELETED = new Object(); 45 protected final Locks.RecidLocks writeLocks = new Locks.LongHashMapRecidLocks();45 protected final ReentrantLock[] writeLocks = Utils.newLocks(32); 46 46 47 47 protected final ReentrantReadWriteLock commitLock; … … 77 77 if(!iter.moveToNext()){ 78 78 //empty map, pause for a moment to give it chance to fill 79 if( closeInProgress ||(parentEngineWeakRef!=null && parentEngineWeakRef.get()==null) || writerFailedException!=null) return;79 if( (parentEngineWeakRef!=null && parentEngineWeakRef.get()==null) || writerFailedException!=null) return; 80 80 Thread.sleep(asyncFlushDelay); 81 81 if(closeInProgress){ 82 //lock world and write everything 83 Utils.lockAll(writeLocks); 84 try{ 85 while(!items.isEmpty()){ 86 iter = items.longMapIterator(); 87 while(iter.moveToNext()){ 88 long recid = iter.key(); 89 Fun.Tuple2<Object,Serializer> value = iter.value(); 90 if(value.a==DELETED){ 91 AsyncWriteEngine.super.delete(recid, value.b); 92 }else{ 93 AsyncWriteEngine.super.update(recid, value.a, value.b); 94 } 95 items.remove(recid, value); 96 } 97 } 98 return; 99 }finally{ 100 Utils.unlockAll(writeLocks); 101 } 102 } 82 103 }else do{ 83 104 //iterate over items and write them 84 105 long recid = iter.key(); 85 106 86 writeLocks.lock(recid);107 Utils.lock(writeLocks,recid); 87 108 try{ 88 109 Fun.Tuple2<Object,Serializer> value = iter.value(); … … 94 115 items.remove(recid, value); 95 116 }finally { 96 writeLocks.unlock(recid);117 Utils.unlock(writeLocks, recid); 97 118 } 98 119 }while(iter.moveToNext()); … … 124 145 @Override 125 146 public <A> long put(A value, Serializer<A> serializer) { 126 checkState();127 128 129 147 if(commitLock!=null) commitLock.readLock().lock(); 130 148 try{ 131 132 149 try { 133 Long recid = newRecids.take(); 150 Long recid = newRecids.take(); //TODO possible deadlock while closing 134 151 update(recid, value, serializer); 135 152 return recid; … … 150 167 @Override 151 168 public <A> A get(long recid, Serializer<A> serializer) { 152 checkState();153 169 if(commitLock!=null) commitLock.readLock().lock(); 154 170 try{ 155 writeLocks.lock(recid);171 Utils.lock(writeLocks,recid); 156 172 try{ 173 checkState(); 157 174 Fun.Tuple2<Object,Serializer> item = items.get(recid); 158 175 if(item!=null){ … … 163 180 return super.get(recid, serializer); 164 181 }finally{ 165 writeLocks.unlock(recid);182 Utils.unlock(writeLocks,recid); 166 183 } 167 184 }finally{ … … 172 189 @Override 173 190 public <A> void update(long recid, A value, Serializer<A> serializer) { 174 checkState(); 175 if(commitLock!=null ) commitLock.readLock().lock();176 try{ 177 178 writeLocks.lock(recid);191 192 if(commitLock!=null && serializer!=SerializerPojo.serializer) commitLock.readLock().lock(); 193 try{ 194 195 Utils.lock(writeLocks, recid); 179 196 try{ 197 checkState(); 180 198 items.put(recid, new Fun.Tuple2(value,serializer)); 181 199 }finally{ 182 writeLocks.unlock(recid);200 Utils.unlock(writeLocks, recid); 183 201 } 184 202 }finally{ 185 if(commitLock!=null ) commitLock.readLock().unlock();203 if(commitLock!=null&& serializer!=SerializerPojo.serializer) commitLock.readLock().unlock(); 186 204 } 187 205 … … 190 208 @Override 191 209 public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) { 192 checkState(); 193 writeLocks.lock(recid); 194 try{ 210 //TODO commit lock? 211 Utils.lock(writeLocks, recid); 212 try{ 213 checkState(); 195 214 Fun.Tuple2<Object, Serializer> existing = items.get(recid); 196 215 A oldValue = existing!=null? (A) existing.a : super.get(recid, serializer); … … 202 221 } 203 222 }finally{ 204 writeLocks.unlock(recid);223 Utils.unlock(writeLocks, recid); 205 224 206 225 } … … 277 296 try{ 278 297 while(!items.isEmpty()) LockSupport.parkNanos(100); 279 280 super. commit();298 newRecids.clear(); 299 super.rollback(); 281 300 }finally { 282 301 commitLock.writeLock().unlock(); -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/BTreeKeySerializer.java
r29363 r29484 83 83 @Override 84 84 public Object[] deserialize(DataInput in, int start, int end, int size) throws IOException { 85 Object[] ret = new Long[size];85 Object[] ret = new Integer[size]; 86 86 int prev = 0 ; 87 87 for(int i = start; i<end; i++){ -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/BTreeMap.java
r29363 r29484 114 114 115 115 /** holds node level locks*/ 116 protected final Lo cks.RecidLocks nodeLocks = new Locks.LongHashMapRecidLocks();116 protected final LongConcurrentHashMap<Thread> nodeLocks = new LongConcurrentHashMap<Thread>(); 117 117 118 118 /** maximal node size allowed in this BTree*/ … … 138 138 private final Values values = new Values(this); 139 139 protected final Serializer defaultSerializer; 140 protected final Atomic.Long counter; 140 141 141 142 … … 154 155 out.writeBoolean(value.valsOutsideNodes); 155 156 out.writeInt(value.maxNodeSize); 157 out.writeLong(value.counterRecid); 156 158 defaultSerializer.serialize(out, value.keySerializer); 157 159 defaultSerializer.serialize(out, value.valueSerializer); … … 168 170 ret.valsOutsideNodes = in.readBoolean(); 169 171 ret.maxNodeSize = in.readInt(); 172 ret.counterRecid = in.readLong(); 170 173 ret.keySerializer = (BTreeKeySerializer) defaultSerializer.deserialize(in, -1); 171 174 ret.valueSerializer = (Serializer) defaultSerializer.deserialize(in, -1); … … 181 184 boolean valsOutsideNodes; 182 185 int maxNodeSize; 186 long counterRecid; 183 187 BTreeKeySerializer keySerializer; 184 188 Serializer valueSerializer; 185 189 Comparator comparator; 186 187 188 189 190 } 190 191 … … 425 426 * @param comparator Comparator to sort keys in this BTree, may be null. 426 427 */ 427 public BTreeMap(Engine engine, int maxNodeSize, boolean hasValues, boolean valsOutsideNodes, 428 public BTreeMap(Engine engine, int maxNodeSize, boolean hasValues, boolean valsOutsideNodes, boolean keepCounter, 428 429 Serializer defaultSerializer, 429 430 BTreeKeySerializer<K> keySerializer, Serializer<V> valueSerializer, Comparator<K> comparator) { … … 449 450 this.valueSerializer = valueSerializer==null ? (Serializer<V>) defaultSerializer : valueSerializer; 450 451 452 451 453 this.keySet = new KeySet(this, hasValues); 452 454 … … 454 456 long rootRecidVal = engine.put(emptyRoot, nodeSerializer); 455 457 rootRecidRef = engine.put(rootRecidVal,Serializer.LONG_SERIALIZER); 458 459 long counterRecid = 0; 460 if(keepCounter){ 461 counterRecid = engine.put(0L, Serializer.LONG_SERIALIZER); 462 this.counter = new Atomic.Long(engine,counterRecid); 463 Bind.size(this,counter); 464 }else{ 465 this.counter = null; 466 } 456 467 457 468 BTreeRoot r = new BTreeRoot(); … … 463 474 r.valueSerializer = this.valueSerializer; 464 475 r.comparator = this.comparator; 476 r.counterRecid = counterRecid; 465 477 this.treeRecid = engine.put(r, new BTreeRootSerializer(this.defaultSerializer)); 478 479 466 480 } 467 481 … … 491 505 this.valsOutsideNodes = r.valsOutsideNodes; 492 506 507 493 508 this.keySet = new KeySet(this, hasValues); 509 510 if(r.counterRecid!=0){ 511 counter = new Atomic.Long(engine,r.counterRecid); 512 Bind.size(this,counter); 513 }else{ 514 this.counter = null; 515 } 494 516 } 495 517 … … 617 639 boolean found; 618 640 do{ 619 nodeLocks.lock(current);641 Utils.lock(nodeLocks, current); 620 642 found = true; 621 643 A = engine.get(current, nodeSerializer); … … 627 649 if(putOnlyIfAbsent){ 628 650 //is not absent, so quit 629 nodeLocks.unlock(current);630 nodeLocks.assertNoLocks();651 Utils.unlock(nodeLocks, current); 652 Utils.assertNoLocks(nodeLocks); 631 653 V ret = valExpand(oldVal); 632 654 notify(v,ret, value2); … … 643 665 engine.update(current, A, nodeSerializer); 644 666 //already in here 645 nodeLocks.unlock(current);646 nodeLocks.assertNoLocks();667 Utils.unlock(nodeLocks, current); 668 Utils.assertNoLocks(nodeLocks); 647 669 V ret = valExpand(oldVal); 648 670 notify(v,ret, value2); … … 652 674 if(A.highKey() != null && comparator.compare(v, A.highKey())>0){ 653 675 //follow link until necessary 654 nodeLocks.unlock(current);676 Utils.unlock(nodeLocks, current); 655 677 found = false; 656 678 int pos2 = findChildren(v, A.keys()); … … 686 708 } 687 709 688 nodeLocks.unlock(current);689 nodeLocks.assertNoLocks();710 Utils.unlock(nodeLocks, current); 711 Utils.assertNoLocks(nodeLocks); 690 712 notify(v, null, value2); 691 713 return null; … … 734 756 735 757 if(!isRoot){ 736 nodeLocks.unlock(current);758 Utils.unlock(nodeLocks, current); 737 759 p = q; 738 760 v = (K) A.highKey(); … … 757 779 758 780 //TODO update tree levels 759 nodeLocks.unlock(current);760 nodeLocks.assertNoLocks();781 Utils.unlock(nodeLocks, current); 782 Utils.assertNoLocks(nodeLocks); 761 783 notify(v, null, value2); 762 784 return null; … … 843 865 while(true){ 844 866 845 nodeLocks.lock(current);867 Utils.lock(nodeLocks, current); 846 868 A = engine.get(current, nodeSerializer); 847 869 int pos = findChildren(key, A.keys()); … … 852 874 oldVal = valExpand(oldVal); 853 875 if(value!=null && !value.equals(oldVal)){ 854 nodeLocks.unlock(current);876 Utils.unlock(nodeLocks, current); 855 877 return null; 856 878 } 857 879 //check for last node which was already deleted 858 880 if(pos == A.keys().length-1 && value == null){ 859 nodeLocks.unlock(current);881 Utils.unlock(nodeLocks, current); 860 882 return null; 861 883 } … … 874 896 A = new LeafNode(keys2, vals2, ((LeafNode)A).next); 875 897 engine.update(current, A, nodeSerializer); 876 nodeLocks.unlock(current);898 Utils.unlock(nodeLocks, current); 877 899 notify((K)key, (V)oldVal, null); 878 900 return (V) oldVal; 879 901 }else{ 880 nodeLocks.unlock(current);902 Utils.unlock(nodeLocks, current); 881 903 //follow link until necessary 882 904 if(A.highKey() != null && comparator.compare(key, A.highKey())>0){ … … 960 982 @Override 961 983 public int size(){ 984 if(counter!=null) 985 return (int) counter.get(); //TODO larger then MAX_INT 986 962 987 long size = 0; 963 988 BTreeIterator iter = new BTreeIterator(); … … 994 1019 } 995 1020 996 nodeLocks.lock(current);1021 Utils.lock(nodeLocks, current); 997 1022 LeafNode leaf = (LeafNode) engine.get(current, nodeSerializer); 998 1023 … … 1000 1025 while(pos==leaf.keys.length){ 1001 1026 //follow leaf link until necessary 1002 nodeLocks.lock(leaf.next);1003 nodeLocks.unlock(current);1027 Utils.lock(nodeLocks, leaf.next); 1028 Utils.unlock(nodeLocks, current); 1004 1029 current = leaf.next; 1005 1030 leaf = (LeafNode) engine.get(current, nodeSerializer); … … 1027 1052 } 1028 1053 } 1029 nodeLocks.unlock(current);1054 Utils.unlock(nodeLocks, current); 1030 1055 return ret; 1031 1056 } … … 1043 1068 } 1044 1069 1045 nodeLocks.lock(current);1070 Utils.lock(nodeLocks, current); 1046 1071 LeafNode leaf = (LeafNode) engine.get(current, nodeSerializer); 1047 1072 … … 1049 1074 while(pos==leaf.keys.length){ 1050 1075 //follow leaf link until necessary 1051 nodeLocks.lock(leaf.next);1052 nodeLocks.unlock(current);1076 Utils.lock(nodeLocks, leaf.next); 1077 Utils.unlock(nodeLocks, current); 1053 1078 current = leaf.next; 1054 1079 leaf = (LeafNode) engine.get(current, nodeSerializer); … … 1073 1098 1074 1099 } 1075 nodeLocks.unlock(current);1100 Utils.unlock(nodeLocks, current); 1076 1101 return (V)ret; 1077 1102 } -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheHashTable.java
r29363 r29484 17 17 package org.mapdb; 18 18 19 import java.util.concurrent.locks.ReentrantLock; 20 19 21 /** 20 22 * Fixed size cache which uses hash table. … … 29 31 30 32 31 protected final Locks.RecidLocks locks = new Locks.SegmentedRecidLocks(16);33 protected final ReentrantLock[] locks = Utils.newLocks(32); 32 34 33 35 protected HashItem[] items; … … 63 65 final int pos = position(recid); 64 66 try{ 65 locks.lock(pos);67 Utils.lock(locks,pos); 66 68 checkClosed(items)[position(recid)] = new HashItem(recid, value); 67 69 }finally{ 68 locks.unlock(pos);70 Utils.unlock(locks,pos); 69 71 } 70 72 return recid; … … 81 83 82 84 try{ 83 locks.lock(pos);85 Utils.lock(locks,pos); 84 86 //not in cache, fetch and add 85 87 final A value = getWrappedEngine().get(recid, serializer); … … 88 90 return value; 89 91 }finally{ 90 locks.unlock(pos);92 Utils.unlock(locks,pos); 91 93 } 92 94 } … … 100 102 final int pos = position(recid); 101 103 try{ 102 locks.lock(pos);104 Utils.lock(locks,pos); 103 105 checkClosed(items)[pos] = new HashItem(recid, value); 104 106 getWrappedEngine().update(recid, value, serializer); 105 107 }finally { 106 locks.unlock(pos);108 Utils.unlock(locks,pos); 107 109 } 108 110 } … … 113 115 try{ 114 116 HashItem[] items2 = checkClosed(items); 115 locks.lock(pos);117 Utils.lock(locks,pos); 116 118 HashItem item = items2[pos]; 117 119 if(item!=null && item.key == recid){ … … 131 133 } 132 134 }finally { 133 locks.unlock(pos);135 Utils.unlock(locks,pos); 134 136 } 135 137 } … … 139 141 final int pos = position(recid); 140 142 try{ 141 locks.lock(recid);143 Utils.lock(locks,pos); 142 144 getWrappedEngine().delete(recid,serializer); 143 145 HashItem[] items2 = checkClosed(items); … … 146 148 items[pos] = null; 147 149 }finally { 148 locks.unlock(recid);150 Utils.unlock(locks,pos); 149 151 } 150 152 -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheLRU.java
r29363 r29484 1 1 package org.mapdb; 2 3 import java.util.concurrent.locks.ReentrantLock; 2 4 3 5 /** … … 10 12 protected LongMap<Object> cache; 11 13 12 protected final Locks.RecidLocks locks = new Locks.SegmentedRecidLocks(16);14 protected final ReentrantLock[] locks = Utils.newLocks(32); 13 15 14 16 … … 26 28 long recid = super.put(value, serializer); 27 29 try{ 28 locks.lock(recid);30 Utils.lock(locks,recid); 29 31 checkClosed(cache).put(recid, value); 30 32 }finally { 31 locks.unlock(recid);33 Utils.unlock(locks,recid); 32 34 } 33 35 return recid; … … 40 42 if(ret!=null) return (A) ret; 41 43 try{ 42 locks.lock(recid);44 Utils.lock(locks,recid); 43 45 ret = super.get(recid, serializer); 44 46 if(ret!=null) checkClosed(cache).put(recid, ret); 45 47 return (A) ret; 46 48 }finally { 47 locks.unlock(recid);49 Utils.unlock(locks,recid); 48 50 } 49 51 } … … 52 54 public <A> void update(long recid, A value, Serializer<A> serializer) { 53 55 try{ 54 locks.lock(recid);56 Utils.lock(locks,recid); 55 57 checkClosed(cache).put(recid, value); 56 58 super.update(recid, value, serializer); 57 59 }finally { 58 locks.unlock(recid);60 Utils.unlock(locks,recid); 59 61 } 60 62 } … … 63 65 public <A> void delete(long recid, Serializer<A> serializer){ 64 66 try{ 65 locks.lock(recid);67 Utils.lock(locks,recid); 66 68 checkClosed(cache).remove(recid); 67 69 super.delete(recid,serializer); 68 70 }finally { 69 locks.unlock(recid);71 Utils.unlock(locks,recid); 70 72 } 71 73 } … … 74 76 public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) { 75 77 try{ 76 locks.lock(recid);78 Utils.lock(locks,recid); 77 79 Engine engine = getWrappedEngine(); 78 80 LongMap cache2 = checkClosed(cache); … … 89 91 } 90 92 }finally { 91 locks.unlock(recid);93 Utils.unlock(locks,recid); 92 94 } 93 95 } -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/CacheWeakSoftRef.java
r29363 r29484 20 20 import java.lang.ref.SoftReference; 21 21 import java.lang.ref.WeakReference; 22 import java.util.concurrent.locks.ReentrantLock; 22 23 23 24 /** … … 30 31 31 32 32 protected final Locks.RecidLocks locks = new Locks.LongHashMapRecidLocks();33 protected final ReentrantLock[] locks = Utils.newLocks(32); 33 34 34 35 protected interface CacheItem{ … … 130 131 131 132 try{ 132 locks.lock(recid);133 Utils.lock(locks,recid); 133 134 Object value = getWrappedEngine().get(recid, serializer); 134 135 if(value!=null) putItemIntoCache(recid, value); … … 136 137 return (A) value; 137 138 }finally{ 138 locks.unlock(recid);139 Utils.unlock(locks,recid); 139 140 } 140 141 … … 144 145 public <A> void update(long recid, A value, Serializer<A> serializer) { 145 146 try{ 146 locks.lock(recid);147 Utils.lock(locks,recid); 147 148 putItemIntoCache(recid, value); 148 149 getWrappedEngine().update(recid, value, serializer); 149 150 }finally { 150 locks.unlock(recid);151 Utils.unlock(locks,recid); 151 152 } 152 153 } … … 163 164 public <A> void delete(long recid, Serializer<A> serializer){ 164 165 try{ 165 locks.lock(recid);166 Utils.lock(locks,recid); 166 167 checkClosed(items).remove(recid); 167 168 getWrappedEngine().delete(recid,serializer); 168 169 }finally { 169 locks.unlock(recid);170 Utils.unlock(locks,recid); 170 171 } 171 172 … … 175 176 public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) { 176 177 try{ 177 locks.lock(recid);178 Utils.lock(locks,recid); 178 179 CacheItem item = checkClosed(items).get(recid); 179 180 Object oldValue = item==null? null: item.get() ; … … 190 191 } 191 192 }finally { 192 locks.unlock(recid);193 Utils.unlock(locks,recid); 193 194 } 194 195 } -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/CompressLZF.java
r29363 r29484 53 53 import java.io.DataOutput; 54 54 import java.io.IOException; 55 import java.io.Serializable;56 55 import java.util.Arrays; 57 56 … … 297 296 @Override 298 297 public void serialize(DataOutput out, byte[] value) throws IOException { 299 if (value == null) return; 298 if (value == null|| value.length==0){ 299 //in this case do not compress data, write 0 as indicator 300 Utils.packInt(out, 0); 301 out.write(value); 302 return; 303 } 300 304 301 305 CompressLZF lzf = LZF.get(); … … 341 345 * Wraps existing serializer and compresses its input/output 342 346 */ 343 public static <E> Serializer<E> serializerCompressWrapper(Serializer<E> serializer) { 344 return new SerializerCompressWrapper<E>(serializer); 345 } 346 347 348 protected static class SerializerCompressWrapper<E> implements Serializer<E>, Serializable { 349 protected final Serializer<E> serializer; 350 public SerializerCompressWrapper(Serializer<E> serializer) { 351 this.serializer = serializer; 352 } 353 354 @Override 355 public void serialize(DataOutput out, E value) throws IOException { 356 //serialize to byte[] 357 DataOutput2 out2 = new DataOutput2(); 358 serializer.serialize(out2, value); 359 byte[] b = out2.copyBytes(); 360 CompressLZF.SERIALIZER.serialize(out, b); 361 } 362 363 @Override 364 public E deserialize(DataInput in, int available) throws IOException { 365 byte[] b = CompressLZF.SERIALIZER.deserialize(in, available); 366 DataInput2 in2 = new DataInput2(b); 367 return serializer.deserialize(in2, b.length); 368 } 369 } 347 public static <E> Serializer<E> CompressionWrapper(Serializer<E> serializer) { 348 return new Serializer.CompressSerializerWrapper<E>(serializer); 349 } 350 370 351 371 352 } -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/DB.java
r29363 r29484 83 83 }else{ 84 84 //create new map 85 ret = new HTreeMap<K,V>(engine,true, Utils.RANDOM.nextInt(), defaultSerializer,null, null);85 ret = new HTreeMap<K,V>(engine,true,false,Utils.RANDOM.nextInt(), defaultSerializer,null, null); 86 86 nameDir.put(name, ret.rootRecid); 87 87 } … … 95 95 * 96 96 * @param name of map to create 97 * @param keepCounter if counter should be kept, without counter updates are faster, but entire collection needs to be traversed to count items. 97 98 * @param keySerializer used to convert keys into/from binary form. Use null for default value. 98 99 * @param valueSerializer used to convert values into/from binary form. Use null for default value. … … 103 104 */ 104 105 synchronized public <K,V> HTreeMap<K,V> createHashMap( 105 String name, Serializer<K> keySerializer, Serializer<V> valueSerializer){106 checkNameNotExists(name); 107 HTreeMap<K,V> ret = new HTreeMap<K,V>(engine, true, Utils.RANDOM.nextInt(), defaultSerializer, keySerializer, valueSerializer);106 String name, boolean keepCounter, Serializer<K> keySerializer, Serializer<V> valueSerializer){ 107 checkNameNotExists(name); 108 HTreeMap<K,V> ret = new HTreeMap<K,V>(engine, true,keepCounter,Utils.RANDOM.nextInt(), defaultSerializer, keySerializer, valueSerializer); 108 109 nameDir.put(name, ret.rootRecid); 109 110 collections.put(name, new WeakReference<Object>(ret)); … … 130 131 }else{ 131 132 //create new map 132 HTreeMap<K,Object> m = new HTreeMap<K,Object>(engine, false, Utils.RANDOM.nextInt(), defaultSerializer, null, null);133 HTreeMap<K,Object> m = new HTreeMap<K,Object>(engine, false,false, Utils.RANDOM.nextInt(), defaultSerializer, null, null); 133 134 ret = m.keySet(); 134 135 nameDir.put(name, m.rootRecid); … … 142 143 * Creates new HashSet 143 144 * @param name of set to create 145 * @param keepCounter if counter should be kept, without counter updates are faster, but entire collection needs to be traversed to count items. 144 146 * @param serializer used to convert keys into/from binary form. Use null for default value. 145 147 * @param <K> item type 146 148 * @throws IllegalArgumentException if name is already used 147 148 149 */ 149 150 150 synchronized public <K> Set<K> createHashSet(String name, Serializer<K> serializer){151 checkNameNotExists(name); 152 HTreeMap<K,Object> ret = new HTreeMap<K,Object>(engine, false, Utils.RANDOM.nextInt(), defaultSerializer, serializer, null);151 synchronized public <K> Set<K> createHashSet(String name, boolean keepCounter, Serializer<K> serializer){ 152 checkNameNotExists(name); 153 HTreeMap<K,Object> ret = new HTreeMap<K,Object>(engine, false,keepCounter,Utils.RANDOM.nextInt(), defaultSerializer, serializer, null); 153 154 nameDir.put(name, ret.rootRecid); 154 155 Set<K> ret2 = ret.keySet(); … … 180 181 }else{ 181 182 //create new map 182 ret = new BTreeMap<K,V>(engine,BTreeMap.DEFAULT_MAX_NODE_SIZE, true, false, defaultSerializer, null, null, null);183 ret = new BTreeMap<K,V>(engine,BTreeMap.DEFAULT_MAX_NODE_SIZE, true, false,false, defaultSerializer, null, null, null); 183 184 nameDir.put(name, ret.treeRecid); 184 185 } … … 192 193 * @param nodeSize maximal size of node, larger node causes overflow and creation of new BTree node. Use large number for small keys, use small number for large keys. 193 194 * @param valuesStoredOutsideNodes if true, values are stored outside of BTree nodes. Use 'true' if your values are large. 195 * @param keepCounter if counter should be kept, without counter updates are faster, but entire collection needs to be traversed to count items. 194 196 * @param keySerializer used to convert keys into/from binary form. Use null for default value. 195 197 * @param valueSerializer used to convert values into/from binary form. Use null for default value. … … 201 203 */ 202 204 synchronized public <K,V> BTreeMap<K,V> createTreeMap( 203 String name, int nodeSize, boolean valuesStoredOutsideNodes, 205 String name, int nodeSize, boolean valuesStoredOutsideNodes, boolean keepCounter, 204 206 BTreeKeySerializer<K> keySerializer, Serializer<V> valueSerializer, Comparator<K> comparator){ 205 207 checkNameNotExists(name); 206 BTreeMap<K,V> ret = new BTreeMap<K,V>(engine, nodeSize, true,valuesStoredOutsideNodes, defaultSerializer, keySerializer, valueSerializer, comparator);208 BTreeMap<K,V> ret = new BTreeMap<K,V>(engine, nodeSize, true,valuesStoredOutsideNodes, keepCounter,defaultSerializer, keySerializer, valueSerializer, comparator); 207 209 nameDir.put(name, ret.treeRecid); 208 210 collections.put(name, new WeakReference<Object>(ret)); … … 240 242 //create new map 241 243 BTreeMap<K,Object> m = new BTreeMap<K,Object>(engine,BTreeMap.DEFAULT_MAX_NODE_SIZE, 242 false, false, defaultSerializer, null, null, null);244 false, false,false, defaultSerializer, null, null, null); 243 245 nameDir.put(name, m.treeRecid); 244 246 ret = m.keySet(); … … 253 255 * @param name of set to create 254 256 * @param nodeSize maximal size of node, larger node causes overflow and creation of new BTree node. Use large number for small keys, use small number for large keys. 257 * @param keepCounter if counter should be kept, without counter updates are faster, but entire collection needs to be traversed to count items. 255 258 * @param serializer used to convert keys into/from binary form. Use null for default value. 256 259 * @param comparator used to sort keys. Use null for default value. TODO delta packing … … 259 262 * @return 260 263 */ 261 synchronized public <K> NavigableSet<K> createTreeSet(String name, int nodeSize, BTreeKeySerializer<K> serializer, Comparator<K> comparator){262 checkNameNotExists(name); 263 BTreeMap<K,Object> ret = new BTreeMap<K,Object>(engine, nodeSize, false, false, defaultSerializer, serializer, null, comparator);264 synchronized public <K> NavigableSet<K> createTreeSet(String name,int nodeSize, boolean keepCounter, BTreeKeySerializer<K> serializer, Comparator<K> comparator){ 265 checkNameNotExists(name); 266 BTreeMap<K,Object> ret = new BTreeMap<K,Object>(engine, nodeSize, false, false, keepCounter, defaultSerializer, serializer, null, comparator); 264 267 nameDir.put(name, ret.treeRecid); 265 268 NavigableSet<K> ret2 = ret.keySet(); -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/DBMaker.java
r29363 r29484 60 60 protected byte[] _xteaEncryptionKey = null; 61 61 62 protected boolean _freeSpaceReclaimDisabled = false;62 protected int _freeSpaceReclaimQ = 5; 63 63 64 64 protected boolean _checksumEnabled = false; … … 124 124 .deleteFilesAfterClose() 125 125 .closeOnJvmShutdown() 126 . journalDisable()126 .writeAheadLogDisable() 127 127 .make() 128 128 .getTreeMap("temp"); … … 139 139 .deleteFilesAfterClose() 140 140 .closeOnJvmShutdown() 141 . journalDisable()141 .writeAheadLogDisable() 142 142 .make() 143 143 .getHashMap("temp"); … … 154 154 .deleteFilesAfterClose() 155 155 .closeOnJvmShutdown() 156 . journalDisable()156 .writeAheadLogDisable() 157 157 .make() 158 158 .getTreeSet("temp"); … … 169 169 .deleteFilesAfterClose() 170 170 .closeOnJvmShutdown() 171 . journalDisable()171 .writeAheadLogDisable() 172 172 .make() 173 173 .getHashSet("temp"); … … 210 210 * @return this builder 211 211 */ 212 public DBMaker journalDisable(){212 public DBMaker writeAheadLogDisable(){ 213 213 this._journalEnabled = false; 214 214 return this; … … 485 485 486 486 /** 487 * In this mode existing free space is not reused, 488 * but records are added to the end of the store. 489 * <p/> 490 * This slightly improves write performance as store does not have 491 * to traverse list of free records to find and reuse existing position. 492 * <p/> 493 * It also decreases chance for store corruption, as existing data 494 * are not overwritten with new record. 495 * <p/> 496 * When this mode is used for longer time, store becomes fragmented. 497 * It is necessary to run defragmentation then. 498 * <p/> 499 * NOTE: this mode is not append-only, just small setting for update-in-place storage. 500 * 501 * 502 * @return this builder 503 */ 504 public DBMaker freeSpaceReclaimDisable(){ 505 this._freeSpaceReclaimDisabled = true; 487 * Set free space reclaim Q. It is value from 0 to 10, indicating how eagerly MapDB 488 * searchs for free space inside store to reuse, before expanding store file. 489 * 0 means that no free space will be reused and store file will just grow (effectively append only). 490 * 10 means that MapDB tries really hard to reuse free space, even if it may hurt performance. 491 * Default value is 5; 492 * 493 * 494 * @return this builder 495 */ 496 public DBMaker freeSpaceReclaimQ(int q){ 497 if(q<0||q>10) throw new IllegalArgumentException("wrong Q"); 498 this._freeSpaceReclaimQ = q; 506 499 return this; 507 500 } … … 536 529 * @return this builder 537 530 */ 538 public DBMaker powerSavingModeEnable(){539 this._powerSavingMode = true;540 return this;541 }531 // public DBMaker powerSavingModeEnable(){ 532 // this._powerSavingMode = true; 533 // return this; 534 // } 542 535 543 536 … … 559 552 throw new UnsupportedOperationException("Can not open in-memory DB in read-only mode."); 560 553 561 if(_readOnly && !_file.exists() ){554 if(_readOnly && !_file.exists() && !_appendStorage){ 562 555 throw new UnsupportedOperationException("Can not open non-existing file in read-only mode."); 563 556 } … … 571 564 572 565 engine = _journalEnabled ? 573 new StorageJournaled(folFac, _freeSpaceReclaimDisabled, _deleteFilesAfterClose, _failOnWrongHeader, _readOnly): 574 new StorageDirect(folFac, _freeSpaceReclaimDisabled, _deleteFilesAfterClose , _failOnWrongHeader, _readOnly); 566 //TODO add extra params 567 //new StoreWAL(folFac, _freeSpaceReclaimDisabled, _deleteFilesAfterClose, _failOnWrongHeader, _readOnly): 568 //new StoreDirect(folFac, _freeSpaceReclaimDisabled, _deleteFilesAfterClose , _failOnWrongHeader, _readOnly); 569 new StoreWAL(folFac, _readOnly,_deleteFilesAfterClose): 570 new StoreDirect(folFac, _readOnly,_deleteFilesAfterClose); 575 571 }else{ 576 572 if(_file==null) throw new UnsupportedOperationException("Append Storage format is not supported with in-memory dbs"); 577 engine = new StorageAppend(_file, _RAF, _readOnly, !_journalEnabled); 578 } 573 engine = new StoreAppend(_file, _RAF, _readOnly, !_journalEnabled); 574 } 575 576 if(_checksumEnabled){ 577 engine = new ByteTransformEngine(engine, Serializer.CRC32_CHECKSUM); 578 } 579 580 if(_xteaEncryptionKey!=null){ 581 engine = new ByteTransformEngine(engine, new EncryptionXTEA(_xteaEncryptionKey)); 582 } 583 584 585 if(_compressionEnabled){ 586 engine = new ByteTransformEngine(engine, CompressLZF.SERIALIZER); 587 } 588 579 589 580 590 AsyncWriteEngine engineAsync = null; … … 584 594 } 585 595 586 if(_checksumEnabled){587 engine = new ByteTransformEngine(engine, Serializer.CRC32_CHECKSUM);588 }589 590 if(_xteaEncryptionKey!=null){591 engine = new ByteTransformEngine(engine, new EncryptionXTEA(_xteaEncryptionKey));592 }593 594 595 if(_compressionEnabled){596 engine = new ByteTransformEngine(engine, CompressLZF.SERIALIZER);597 }598 596 599 597 engine = new SnapshotEngine(engine); … … 625 623 @Override 626 624 public void run() { 625 626 // for JOSM plugin ImageryCache 627 org.openstreetmap.josm.plugins.imagerycache.TileDAOMapDB.dbNotAvailable = true; 627 628 if(!engine2.isClosed()) 628 629 engine2.close(); -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/EngineWrapper.java
r29363 r29484 378 378 } 379 379 } 380 381 382 /** Engine wrapper with all methods synchronized on global lock, useful to diagnose concurrency issues.*/ 383 public static class SynchronizedEngineWrapper extends EngineWrapper{ 384 385 protected SynchronizedEngineWrapper(Engine engine) { 386 super(engine); 387 } 388 389 @Override 390 synchronized public <A> long put(A value, Serializer<A> serializer) { 391 return super.put(value, serializer); 392 } 393 394 @Override 395 synchronized public <A> A get(long recid, Serializer<A> serializer) { 396 return super.get(recid, serializer); 397 } 398 399 @Override 400 synchronized public <A> void update(long recid, A value, Serializer<A> serializer) { 401 super.update(recid, value, serializer); 402 } 403 404 @Override 405 synchronized public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) { 406 return super.compareAndSwap(recid, expectedOldValue, newValue, serializer); 407 } 408 409 @Override 410 synchronized public <A> void delete(long recid, Serializer<A> serializer) { 411 super.delete(recid, serializer); 412 } 413 414 @Override 415 synchronized public void close() { 416 super.close(); 417 } 418 419 @Override 420 synchronized public boolean isClosed() { 421 return super.isClosed(); 422 } 423 424 @Override 425 synchronized public void commit() { 426 super.commit(); 427 } 428 429 @Override 430 synchronized public void rollback() { 431 super.rollback(); 432 } 433 434 @Override 435 synchronized public boolean isReadOnly() { 436 return super.isReadOnly(); 437 } 438 439 @Override 440 synchronized public void compact() { 441 super.compact(); 442 } 443 } 380 444 381 445 } -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/HTreeMap.java
r29363 r29484 52 52 protected final int hashSalt; 53 53 54 protected final Atomic.Long counter; 54 55 55 56 protected final Serializer<K> keySerializer; … … 86 87 out.writeBoolean(value.hasValues); 87 88 out.writeInt(value.hashSalt); 89 out.writeLong(value.counterRecid); 88 90 for(int i=0;i<16;i++){ 89 91 Utils.packLong(out, value.segmentRecids[i]); … … 100 102 r.hasValues = in.readBoolean(); 101 103 r.hashSalt = in.readInt(); 104 r.counterRecid = in.readLong(); 102 105 r.segmentRecids = new long[16]; 103 106 for(int i=0;i<16;i++){ … … 116 119 boolean hasValues; 117 120 int hashSalt; 121 long counterRecid; 118 122 Serializer keySerializer; 119 123 Serializer valueSerializer; 124 120 125 } 121 126 … … 212 217 * @param valueSerializer Serializer used for values. May be null for default value 213 218 */ 214 public HTreeMap(Engine engine, boolean hasValues, int hashSalt, Serializer defaultSerializer, Serializer<K> keySerializer, Serializer<V> valueSerializer) {219 public HTreeMap(Engine engine, boolean hasValues, boolean keepCounter, int hashSalt, Serializer defaultSerializer, Serializer<K> keySerializer, Serializer<V> valueSerializer) { 215 220 this.engine = engine; 216 221 this.hasValues = hasValues; 217 222 this.hashSalt = hashSalt; 223 224 218 225 SerializerBase.assertSerializable(keySerializer); 219 226 SerializerBase.assertSerializable(valueSerializer); 227 220 228 221 229 if(defaultSerializer == null) defaultSerializer = Serializer.BASIC_SERIALIZER; … … 229 237 for(int i=0;i<16;i++) 230 238 segmentRecids[i] = engine.put(new long[16][], DIR_SERIALIZER); 239 240 long counterRecid = 0; 241 if(keepCounter){ 242 counterRecid = engine.put(0L, Serializer.LONG_SERIALIZER); 243 this.counter = new Atomic.Long(engine,counterRecid); 244 Bind.size(this,counter); 245 }else{ 246 this.counter = null; 247 } 248 231 249 HashRoot r = new HashRoot(); 232 250 r.hasValues = hasValues; 233 251 r.hashSalt = hashSalt; 252 r.counterRecid = counterRecid; 234 253 r.segmentRecids = segmentRecids; 235 254 r.keySerializer = this.keySerializer; 236 255 r.valueSerializer = this.valueSerializer; 237 256 this.rootRecid = engine.put(r, new HashRootSerializer(defaultSerializer)); 257 238 258 } 239 259 … … 259 279 this.keySerializer = r.keySerializer; 260 280 this.valueSerializer = r.valueSerializer; 281 282 if(r.counterRecid!=0){ 283 counter = new Atomic.Long(engine,r.counterRecid); 284 Bind.size(this,counter); 285 }else{ 286 this.counter = null; 287 } 261 288 } 262 289 … … 294 321 @Override 295 322 public int size() { 323 if(counter!=null) 324 return (int) counter.get(); //TODO larger then MAX_INT 325 326 296 327 long counter = 0; 297 328 -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/Queues.java
r29363 r29484 1 1 package org.mapdb; 2 3 4 2 5 3 import java.io.DataInput; … … 13 11 14 12 /** 15 * Various queue salgorithms13 * Various queue algorithms 16 14 */ 17 15 public final class Queues { … … 107 105 public void clear() { 108 106 while(!isEmpty()) 109 remove();107 poll(); 110 108 } 111 109 … … 124 122 if(ret == null) throw new NoSuchElementException(); 125 123 return ret; 126 127 124 } 128 125 … … 132 129 return add(e); 133 130 } 134 135 131 136 132 … … 202 198 203 199 protected final boolean useLocks; 204 protected final Locks.RecidLocks locks; 205 200 protected final ReentrantLock[] locks; 206 201 207 202 … … 209 204 super(engine, serializer, headerRecid); 210 205 this.useLocks = useLocks; 211 locks = useLocks? new Locks.LongHashMapRecidLocks() : null;206 locks = useLocks? Utils.newLocks(32) : null; 212 207 } 213 208 … … 229 224 Node<E> n; 230 225 do{ 231 if(useLocks && head2!=0) locks.unlock(head2);226 if(useLocks && head2!=0)Utils.lock(locks,head2); 232 227 head2 =head.get(); 233 228 if(head2 == 0) return null; 234 229 235 if(useLocks && head2!=0) locks.lock(head2);230 if(useLocks && head2!=0)Utils.lock(locks,head2); 236 231 n = engine.get(head2, nodeSerializer); 237 232 }while(n==null || !head.compareAndSet(head2, n.next)); 238 233 if(useLocks && head2!=0){ 239 234 engine.delete(head2,Serializer.LONG_SERIALIZER); 240 locks.unlock(head2);235 Utils.unlock(locks,head2); 241 236 }else{ 242 237 engine.update(head2, null, nodeSerializer); … … 328 323 } 329 324 330 331 @Override 332 public boolean isEmpty() { 333 return head.get() == 0; 334 } 335 325 @Override 336 326 public boolean add(E item){ 337 327 final long nextTail = engine.put((Node<E>)Node.EMPTY, nodeSerializer); … … 347 337 } 348 338 339 @Override 349 340 public E poll(){ 350 341 while(true){ … … 472 463 473 464 @Override 465 public void clear() { 466 // praise locking 467 lock.lock(); 468 try { 469 for (int i = 0; i < size; i++) { 470 poll(); 471 } 472 } finally { 473 lock.unlock(); 474 } 475 } 476 477 @Override 474 478 public E poll() { 475 479 lock.lock(); … … 569 573 } 570 574 571 572 575 } -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/Serializer.java
r29363 r29484 20 20 import java.io.DataOutput; 21 21 import java.io.IOException; 22 import java.io.Serializable; 22 23 import java.util.zip.CRC32; 23 24 … … 148 149 */ 149 150 150 public static finalSerializer<byte[]> CRC32_CHECKSUM = new Serializer<byte[]>() {151 Serializer<byte[]> CRC32_CHECKSUM = new Serializer<byte[]>() { 151 152 @Override 152 153 public void serialize(DataOutput out, byte[] value) throws IOException { … … 174 175 175 176 177 Serializer<byte[] > BYTE_ARRAY_SERIALIZER = new Serializer<byte[]>() { 178 179 @Override 180 public void serialize(DataOutput out, byte[] value) throws IOException { 181 out.write(value); 182 } 183 184 @Override 185 public byte[] deserialize(DataInput in, int available) throws IOException { 186 byte[] ret = new byte[available]; 187 in.readFully(ret); 188 return ret; 189 } 190 } ; 191 192 193 class CompressSerializerWrapper<E> implements Serializer<E>, Serializable { 194 protected final Serializer<E> serializer; 195 public CompressSerializerWrapper(Serializer<E> serializer) { 196 this.serializer = serializer; 197 } 198 199 @Override 200 public void serialize(DataOutput out, E value) throws IOException { 201 //serialize to byte[] 202 DataOutput2 out2 = new DataOutput2(); 203 serializer.serialize(out2, value); 204 byte[] b = out2.copyBytes(); 205 CompressLZF.SERIALIZER.serialize(out, b); 206 } 207 208 @Override 209 public E deserialize(DataInput in, int available) throws IOException { 210 byte[] b = CompressLZF.SERIALIZER.deserialize(in, available); 211 DataInput2 in2 = new DataInput2(b); 212 return serializer.deserialize(in2, b.length); 213 } 214 } 176 215 } 177 216 -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/SerializerBase.java
r29363 r29484 34 34 35 35 36 static final Set knownSerializable = new HashSet(Arrays.asList( 36 static final class knownSerializable{ 37 static final Set get = new HashSet(Arrays.asList( 37 38 BTreeKeySerializer.STRING, 38 39 BTreeKeySerializer.ZERO_OR_POSITIVE_LONG, 39 40 BTreeKeySerializer.ZERO_OR_POSITIVE_INT, 40 41 41 Utils.COMPARABLE_COMPARATOR, Utils.COMPARABLE_COMPARATOR_WITH_NULLS, 42 42 … … 44 44 Serializer.EMPTY_SERIALIZER, Serializer.BASIC_SERIALIZER, Serializer.CRC32_CHECKSUM 45 45 )); 46 } 46 47 47 48 public static void assertSerializable(Object o){ 48 49 if(o!=null && !(o instanceof Serializable) 49 && !knownSerializable. contains(o)){50 && !knownSerializable.get.contains(o)){ 50 51 throw new IllegalArgumentException("Not serializable: "+o.getClass()); 51 52 } … … 55 56 * Utility class similar to ArrayList, but with fast identity search. 56 57 */ 57 final static class FastArrayList<K> {58 protected final static class FastArrayList<K> { 58 59 59 60 private int size = 0; … … 303 304 if(((BTreeKeySerializer.BasicKeySerializer)obj).defaultSerializer!=this) throw new InternalError(); 304 305 return; 305 } else if(clazz == Compress LZF.SerializerCompressWrapper.class){306 } else if(clazz == CompressSerializerWrapper.class){ 306 307 out.write(SERIALIZER_COMPRESSION_WRAPPER); 307 serialize(out, ((Compress LZF.SerializerCompressWrapper)obj).serializer, objectStack);308 serialize(out, ((CompressSerializerWrapper)obj).serializer, objectStack); 308 309 return; 309 310 … … 1111 1112 break; 1112 1113 case SERIALIZER_COMPRESSION_WRAPPER: 1113 ret = CompressLZF. serializerCompressWrapper((Serializer) deserialize(is, objectStack));1114 ret = CompressLZF.CompressionWrapper((Serializer) deserialize(is, objectStack)); 1114 1115 break; 1115 1116 default: -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/SnapshotEngine.java
r29363 r29484 3 3 import java.util.Map; 4 4 import java.util.concurrent.ConcurrentHashMap; 5 import java.util.concurrent.locks.ReentrantLock; 5 6 import java.util.concurrent.locks.ReentrantReadWriteLock; 6 7 … … 15 16 public class SnapshotEngine extends EngineWrapper{ 16 17 17 protected final Locks.RecidLocks locks = new Locks.LongHashMapRecidLocks();18 protected final ReentrantLock[] locks = Utils.newLocks(32); 18 19 19 20 protected final static Object NOT_EXIST = new Object(); … … 38 39 public <A> long put(A value, Serializer<A> serializer) { 39 40 long recid = super.put(value, serializer); 40 locks.lock(recid);41 Utils.lock(locks,recid); 41 42 try{ 42 43 for(Snapshot s:snapshots.keySet()){ … … 45 46 return recid; 46 47 }finally{ 47 locks.unlock(recid);48 Utils.unlock(locks,recid); 48 49 } 49 50 } … … 51 52 @Override 52 53 public <A> boolean compareAndSwap(long recid, A expectedOldValue, A newValue, Serializer<A> serializer) { 53 locks.lock(recid);54 Utils.lock(locks,recid); 54 55 try{ 55 56 boolean ret = super.compareAndSwap(recid, expectedOldValue, newValue, serializer); … … 61 62 return ret; 62 63 }finally{ 63 locks.unlock(recid);64 Utils.unlock(locks,recid); 64 65 } 65 66 } … … 67 68 @Override 68 69 public <A> void update(long recid, A value, Serializer<A> serializer) { 69 locks.lock(recid);70 Utils.lock(locks,recid); 70 71 try{ 71 72 Object val = NOT_INIT_YET; … … 80 81 super.update(recid, value, serializer); 81 82 }finally{ 82 locks.unlock(recid);83 Utils.unlock(locks,recid); 83 84 } 84 85 } … … 86 87 @Override 87 88 public <A> void delete(long recid, Serializer<A> serializer) { 88 locks.lock(recid);89 Utils.lock(locks,recid); 89 90 try{ 90 91 Object val = NOT_INIT_YET; … … 99 100 super.delete(recid,serializer); 100 101 }finally{ 101 locks.unlock(recid);102 Utils.unlock(locks,recid); 102 103 } 103 104 } … … 131 132 @Override 132 133 public <A> A get(long recid, Serializer<A> serializer) { 133 locks.lock(recid);134 Utils.lock(locks,recid); 134 135 try{ 135 136 Object ret = oldValues.get(recid); … … 140 141 return SnapshotEngine.this.getWrappedEngine().get(recid, serializer); 141 142 }finally{ 142 locks.unlock(recid);143 Utils.unlock(locks,recid); 143 144 } 144 145 } -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/Utils.java
r29363 r29484 21 21 import java.util.*; 22 22 import java.util.concurrent.atomic.AtomicLong; 23 import java.util.concurrent.locks.LockSupport; 24 import java.util.concurrent.locks.ReentrantLock; 25 import java.util.concurrent.locks.ReentrantReadWriteLock; 23 26 import java.util.logging.Logger; 24 27 … … 188 191 File index = File.createTempFile("mapdb","db"); 189 192 index.deleteOnExit(); 190 new File(index.getPath()+ StorageDirect.DATA_FILE_EXT).deleteOnExit();191 new File(index.getPath()+ Stor ageJournaled.TRANS_LOG_FILE_EXT).deleteOnExit();193 new File(index.getPath()+ StoreDirect.DATA_FILE_EXT).deleteOnExit(); 194 new File(index.getPath()+ StoreWAL.TRANS_LOG_FILE_EXT).deleteOnExit(); 192 195 193 196 return index; … … 233 236 } 234 237 235 public static void print er(final AtomicLong value){236 new Thread("print er"){238 public static void printProgress(final AtomicLong value){ 239 new Thread("printProgress"){ 237 240 { 238 241 setDaemon(true); 239 242 } 240 241 243 242 244 @Override 243 245 public void run() { 244 246 long startValue = value.get(); 245 long startTime = System.currentTimeMillis(); 247 long startTime, time = System.currentTimeMillis(); 248 startTime = time; 246 249 long old = value.get(); 247 250 while(true){ 248 249 try { 250 Thread.sleep(1000); 251 } catch (InterruptedException e) { 251 time+=1000; 252 while(time>System.currentTimeMillis()){ 253 LockSupport.parkNanos(1000*1000); //1ms 254 } 255 256 long current = value.get(); 257 if(current<0){ 258 System.out.println("Finished, total time: "+(time-startTime)+", aprox items: "+old); 252 259 return; 253 260 } 254 255 long current = value.get(); 256 long totalSpeed = 1000*(current-startValue)/(System.currentTimeMillis()-startTime); 261 long totalSpeed = 1000*(current-startValue)/(time-startTime); 257 262 System.out.print("total: "+current+" - items per last second: "+(current-old)+" - avg items per second: "+totalSpeed+"\r"); 258 263 old = current; … … 263 268 } 264 269 270 public static <A> DataOutput2 serializer(Serializer<A> serializer, A value) { 271 try{ 272 DataOutput2 out = new DataOutput2(); 273 serializer.serialize(out,value); 274 return out; 275 }catch(IOException e){ 276 throw new IOError(e); 277 } 278 279 } 280 281 public static String randomString(int size) { 282 String chars = "0123456789abcdefghijklmnopqrstuvwxyz !@#$%^&*()_+=-{}[]:\",./<>?|\\"; 283 StringBuilder b = new StringBuilder(size); 284 for(int i=0;i<size;i++){ 285 b.append(chars.charAt(RANDOM.nextInt(chars.length()))); 286 } 287 return b.toString(); 288 } 289 290 public static ReentrantReadWriteLock[] newReadWriteLocks(int size) { 291 ReentrantReadWriteLock[] locks = new ReentrantReadWriteLock[size]; 292 for(int i=0;i<locks.length;i++) locks[i] = new ReentrantReadWriteLock(); 293 return locks; 294 } 295 296 public static ReentrantLock[] newLocks(int size) { 297 ReentrantLock[] locks = new ReentrantLock[size]; 298 for(int i=0;i<locks.length;i++) locks[i] = new ReentrantLock(); 299 return locks; 300 } 301 302 public static void lock(ReentrantLock[] locks, long recid) { 303 locks[Utils.longHash(recid)%locks.length].lock(); 304 } 305 306 public static void lockAll(ReentrantLock[] locks) { 307 for(ReentrantLock lock:locks)lock.lock(); 308 } 309 310 public static void unlockAll(ReentrantLock[] locks) { 311 for(ReentrantLock lock:locks)lock.unlock(); 312 } 313 314 315 public static void unlock(ReentrantLock[] locks, long recid) { 316 locks[Utils.longHash(recid)%locks.length].unlock(); 317 } 318 319 320 public static void readLock(ReentrantReadWriteLock[] locks, long recid) { 321 locks[Utils.longHash(recid)%locks.length].readLock().lock(); 322 } 323 324 public static void readUnlock(ReentrantReadWriteLock[] locks, long recid) { 325 locks[Utils.longHash(recid)%locks.length].readLock().unlock(); 326 } 327 328 public static void writeLock(ReentrantReadWriteLock[] locks, long recid) { 329 locks[Utils.longHash(recid)%locks.length].writeLock().lock(); 330 } 331 332 public static void writeUnlock(ReentrantReadWriteLock[] locks, long recid) { 333 locks[Utils.longHash(recid)%locks.length].writeLock().unlock(); 334 } 335 336 public static void writeLockAll(ReentrantReadWriteLock[] locks) { 337 for(ReentrantReadWriteLock l:locks) l.writeLock().lock(); 338 } 339 340 public static void writeUnlockAll(ReentrantReadWriteLock[] locks) { 341 for(ReentrantReadWriteLock l:locks) l.writeLock().unlock(); 342 } 343 344 345 public static void lock(LongConcurrentHashMap<Thread> locks, long recid){ 346 //feel free to rewrite, if you know better (more efficient) way 347 if(locks.get(recid)==Thread.currentThread()){ 348 //check node is not already locked by this thread 349 throw new InternalError("node already locked by current thread: "+recid); 350 } 351 352 while(locks.putIfAbsent(recid, Thread.currentThread()) != null){ 353 LockSupport.parkNanos(10); 354 } 355 } 356 357 358 359 public static void unlock(LongConcurrentHashMap<Thread> locks,final long recid) { 360 final Thread t = locks.remove(recid); 361 if(t!=Thread.currentThread()) 362 throw new InternalError("unlocked wrong thread"); 363 364 } 365 366 public static void assertNoLocks(LongConcurrentHashMap<Thread> locks){ 367 if(CC.PARANOID){ 368 LongMap.LongMapIterator<Thread> i = locks.longMapIterator(); 369 while(i.moveToNext()){ 370 if(i.value()==Thread.currentThread()){ 371 throw new InternalError("Node "+i.key()+" is still locked"); 372 } 373 } 374 } 375 } 265 376 } -
applications/editors/josm/plugins/imagerycache/src/org/mapdb/Volume.java
r29363 r29484 24 24 import java.nio.ByteBuffer; 25 25 import java.nio.MappedByteBuffer; 26 import java.nio.channels.AsynchronousFileChannel;27 26 import java.nio.channels.FileChannel; 28 import java.nio.file.StandardOpenOption;29 27 import java.util.Arrays; 28 import java.util.Map; 29 import java.util.WeakHashMap; 30 30 import java.util.concurrent.ExecutionException; 31 31 import java.util.concurrent.Future; … … 34 34 35 35 /** 36 * MapDB abstraction over raw storage (file, disk partition, memory etc...) 36 * MapDB abstraction over raw storage (file, disk partition, memory etc...). 37 * <p/> 38 * Implementations needs to be thread safe (especially 39 'ensureAvailable') operation. 40 * However updates do not have to be atomic, it is clients responsibility 41 * to ensure two threads are not writing/reading into the same location. 37 42 * 38 43 * @author Jan Kotek … … 41 46 42 47 public static final int BUF_SIZE = 1<<30; 43 public static final int INITIAL_SIZE = 1024*32;44 48 45 49 abstract public void ensureAvailable(final long offset); … … 49 53 abstract public void putByte(final long offset, final byte value); 50 54 51 abstract public void putData(final long offset, final byte[] value, int size);55 abstract public void putData(final long offset, final byte[] src, int srcPos, int srcSize); 52 56 abstract public void putData(final long offset, final ByteBuffer buf); 53 57 … … 88 92 putByte(offset, (byte)(b & 0xff)); 89 93 } 94 95 /** 96 * Reads a long from the indicated position 97 */ 98 public final long getSixLong(long pos) { 99 return 100 ((long) (getByte(pos + 0) & 0xff) << 40) | 101 ((long) (getByte(pos + 1) & 0xff) << 32) | 102 ((long) (getByte(pos + 2) & 0xff) << 24) | 103 ((long) (getByte(pos + 3) & 0xff) << 16) | 104 ((long) (getByte(pos + 4) & 0xff) << 8) | 105 ((long) (getByte(pos + 5) & 0xff) << 0); 106 } 107 108 /** 109 * Writes a long to the indicated position 110 */ 111 public final void putSixLong(long pos, long value) { 112 if(value<0) throw new IllegalArgumentException(); 113 if(value >> (6*8)!=0) 114 throw new IllegalArgumentException("does not fit"); 115 //TODO read/write as integer+short, might be faster 116 putByte(pos + 0, (byte) (0xff & (value >> 40))); 117 putByte(pos + 1, (byte) (0xff & (value >> 32))); 118 putByte(pos + 2, (byte) (0xff & (value >> 24))); 119 putByte(pos + 3, (byte) (0xff & (value >> 16))); 120 putByte(pos + 4, (byte) (0xff & (value >> 8))); 121 putByte(pos + 5, (byte) (0xff & (value >> 0))); 122 123 } 124 90 125 91 126 /** returns underlying file if it exists */ … … 111 146 public static Factory fileFactory(final boolean readOnly, final boolean RAF, final File indexFile){ 112 147 return fileFactory(readOnly, RAF, indexFile, 113 new File(indexFile.getPath() + Stor ageDirect.DATA_FILE_EXT),114 new File(indexFile.getPath() + Stor ageJournaled.TRANS_LOG_FILE_EXT));148 new File(indexFile.getPath() + StoreDirect.DATA_FILE_EXT), 149 new File(indexFile.getPath() + StoreWAL.TRANS_LOG_FILE_EXT)); 115 150 } 116 151 … … 142 177 return new Factory() { 143 178 144 @Override public Volume createIndexVolume() {179 @Override public synchronized Volume createIndexVolume() { 145 180 return new MemoryVol(useDirectBuffer); 146 181 } 147 182 148 @Override public Volume createPhysVolume() {183 @Override public synchronized Volume createPhysVolume() { 149 184 return new MemoryVol(useDirectBuffer); 150 185 } 151 186 152 @Override public Volume createTransLogVolume() {187 @Override public synchronized Volume createTransLogVolume() { 153 188 return new MemoryVol(useDirectBuffer); 154 189 } … … 181 216 //check for most common case, this is already mapped 182 217 if(buffersPos<buffers.length && buffers[buffersPos]!=null && 183 buffers[buffersPos].capacity()>=offset% BUF_SIZE) 218 buffers[buffersPos].capacity()>=offset% BUF_SIZE){ 184 219 return; 220 } 185 221 186 222 growLock.lock(); … … 191 227 return; 192 228 229 ByteBuffer[] buffers2 = buffers; 193 230 194 231 //grow array if necessary 195 if(buffersPos>=buffers.length){ 196 buffers = Arrays.copyOf(buffers, Math.max(buffersPos, buffers.length * 2)); 197 } 232 if(buffersPos>=buffers2.length){ 233 buffers2 = Arrays.copyOf(buffers2, Math.max(buffersPos+1, buffers2.length * 2)); 234 } 235 198 236 199 237 //just remap file buffer 200 ByteBuffer newBuf = makeNewBuffer(offset); 238 if( buffers2[buffersPos] == null){ 239 //make sure previous buffer is fully expanded 240 if(buffersPos>0){ 241 ByteBuffer oldPrev = buffers2[buffersPos-1]; 242 if(oldPrev == null || oldPrev.capacity()!=BUF_SIZE){ 243 buffers2[buffersPos-1] = makeNewBuffer(1L*buffersPos*BUF_SIZE-1,buffers2); 244 } 245 } 246 } 247 248 249 ByteBuffer newBuf = makeNewBuffer(offset, buffers2); 201 250 if(readOnly) 202 251 newBuf = newBuf.asReadOnlyBuffer(); 203 252 204 buffers[buffersPos] = newBuf; 253 buffers2[buffersPos] = newBuf; 254 255 buffers = buffers2; 205 256 }finally{ 206 257 growLock.unlock(); … … 208 259 } 209 260 210 protected abstract ByteBuffer makeNewBuffer(long offset );261 protected abstract ByteBuffer makeNewBuffer(long offset, ByteBuffer[] buffers2); 211 262 212 263 protected final ByteBuffer internalByteBuffer(long offset) { … … 233 284 234 285 235 @Override public final void putData(final long offset, final byte[] value, final int size){286 @Override public void putData(final long offset, final byte[] src, int srcPos, int srcSize){ 236 287 final ByteBuffer b1 = internalByteBuffer(offset); 237 288 final int bufPos = (int) (offset% BUF_SIZE); … … 239 290 synchronized (b1){ 240 291 b1.position(bufPos); 241 b1.put( value, 0, size);292 b1.put(src, srcPos, srcSize); 242 293 } 243 294 } … … 264 315 try{ 265 316 return internalByteBuffer(offset).getInt((int) (offset% BUF_SIZE)); 317 } catch (NullPointerException e) { 318 throw new RuntimeException(""+offset,e); 319 266 320 }catch(IndexOutOfBoundsException e){ 267 321 throw new IOError(new EOFException()); … … 346 400 protected final FileChannel.MapMode mapMode; 347 401 protected final java.io.RandomAccessFile raf; 402 403 protected final Map<ByteBuffer, String> unreleasedBuffers = 404 Utils.isWindows() ? new WeakHashMap<ByteBuffer, String>() : null; 348 405 349 406 static final int BUF_SIZE_INC = 1024*1024; … … 370 427 }else{ 371 428 buffers = new ByteBuffer[1]; 372 buffers[0] = fileChannel.map(mapMode, 0, INITIAL_SIZE);373 if(mapMode == FileChannel.MapMode.READ_ONLY)374 buffers[0] = buffers[0].asReadOnlyBuffer();429 // buffers[0] = fileChannel.map(mapMode, 0, INITIAL_SIZE); 430 // if(mapMode == FileChannel.MapMode.READ_ONLY) 431 // buffers[0] = buffers[0].asReadOnlyBuffer(); 375 432 376 433 } … … 394 451 } 395 452 buffers = null; 453 if(unreleasedBuffers!=null){ 454 for(ByteBuffer b:unreleasedBuffers.keySet().toArray(new MappedByteBuffer[0])){ 455 if(b!=null && (b instanceof MappedByteBuffer)){ 456 unmap((MappedByteBuffer) b); 457 } 458 } 459 } 460 396 461 } catch (IOException e) { 397 462 throw new IOError(e); … … 428 493 429 494 @Override 430 protected ByteBuffer makeNewBuffer(long offset) { 431 try { 432 //unmap old buffer on windows 433 int bufPos = (int) (offset/BUF_SIZE); 434 if(bufPos<buffers.length && buffers[bufPos]!=null){ 435 unmap((MappedByteBuffer) buffers[bufPos]); 436 buffers[bufPos] = null; 437 } 438 495 protected ByteBuffer makeNewBuffer(long offset, ByteBuffer[] buffers2) { 496 try { 439 497 long newBufSize = offset% BUF_SIZE; 440 498 newBufSize = newBufSize + newBufSize%BUF_SIZE_INC; //round to BUF_SIZE_INC 441 return fileChannel.map(442 mapMode,443 offset - offset% BUF_SIZE, newBufSize );499 ByteBuffer buf = fileChannel.map( mapMode, offset - offset% BUF_SIZE, newBufSize ); 500 if(unreleasedBuffers!=null) unreleasedBuffers.put(buf, ""); 501 return buf; 444 502 } catch (IOException e) { 445 503 if(e.getCause()!=null && e.getCause() instanceof OutOfMemoryError){ … … 463 521 super(false); 464 522 this.useDirectBuffer = useDirectBuffer; 465 ByteBuffer b0 = useDirectBuffer? 466 ByteBuffer.allocateDirect(INITIAL_SIZE) : 467 ByteBuffer.allocate(INITIAL_SIZE); 468 buffers = new ByteBuffer[]{b0}; 469 } 470 471 @Override protected ByteBuffer makeNewBuffer(long offset) { 523 // ByteBuffer b0 = useDirectBuffer? 524 // ByteBuffer.allocateDirect(INITIAL_SIZE) : 525 // ByteBuffer.allocate(INITIAL_SIZE); 526 // buffers = new ByteBuffer[]{b0}; 527 buffers=new ByteBuffer[1]; 528 } 529 530 @Override protected ByteBuffer makeNewBuffer(long offset, ByteBuffer[] buffers2) { 472 531 final int newBufSize = Utils.nextPowTwo((int) (offset % BUF_SIZE)); 473 532 //double size of existing in-memory-buffer … … 476 535 ByteBuffer.allocate(newBufSize); 477 536 final int buffersPos = (int) (offset/ BUF_SIZE); 478 final ByteBuffer oldBuffer = buffers [buffersPos];537 final ByteBuffer oldBuffer = buffers2[buffersPos]; 479 538 if(oldBuffer!=null){ 480 539 //copy old buffer if it exists … … 580 639 581 640 @Override 582 synchronized public void putData( long offset, byte[] value, int size){583 try { 584 if(pos!=offset){ 585 raf.seek(offset); 586 } 587 pos=offset+s ize;588 raf.write( value,0,size);641 synchronized public void putData(final long offset, final byte[] src, int srcPos, int srcSize){ 642 try { 643 if(pos!=offset){ 644 raf.seek(offset); 645 } 646 pos=offset+srcSize; 647 raf.write(src,srcPos,srcSize); 589 648 } catch (IOException e) { 590 649 throw new IOError(e); … … 602 661 byte[] b = new byte[size]; 603 662 buf.get(b); 604 putData(offset, b, size);663 putData(offset, b, 0, size); 605 664 } catch (IOException e) { 606 665 throw new IOError(e); … … 708 767 } 709 768 710 public static class AsyncFileChannelVol extends Volume{711 712 713 protected AsynchronousFileChannel channel;714 protected final boolean readOnly;715 protected final File file;716 717 public AsyncFileChannelVol(File file, boolean readOnly){718 this.readOnly = readOnly;719 this.file = file;720 try {721 this.channel = readOnly?722 AsynchronousFileChannel.open(file.toPath(),StandardOpenOption.READ):723 AsynchronousFileChannel.open(file.toPath(),StandardOpenOption.READ, StandardOpenOption.WRITE);724 725 } catch (IOException e) {726 throw new IOError(e);727 }728 }729 730 @Override731 public void ensureAvailable(long offset) {732 //we do not have a list of ByteBuffers, so ensure size does not have to do anything733 }734 735 736 737 protected void await(Future<Integer> future, int size) {738 try {739 int res = future.get();740 if(res!=size) throw new InternalError("not enough bytes");741 } catch (InterruptedException e) {742 throw new RuntimeException(e);743 } catch (ExecutionException e) {744 throw new RuntimeException(e);745 }746 }747 748 @Override749 public void putByte(long offset, byte value) {750 ByteBuffer b = ByteBuffer.allocate(1);751 b.put(0, value);752 await(channel.write(b, offset),1);753 }754 @Override755 public void putInt(long offset, int value) {756 ByteBuffer b = ByteBuffer.allocate(4);757 b.putInt(0, value);758 await(channel.write(b, offset),4);759 }760 761 @Override762 public void putLong(long offset, long value) {763 ByteBuffer b = ByteBuffer.allocate(8);764 b.putLong(0, value);765 await(channel.write(b, offset),8);766 }767 768 @Override769 public void putData(long offset, byte[] value, int size) {770 ByteBuffer b = ByteBuffer.wrap(value);771 b.limit(size);772 await(channel.write(b,offset),size);773 }774 775 @Override776 public void putData(long offset, ByteBuffer buf) {777 await(channel.write(buf,offset), buf.limit() - buf.position());778 }779 780 781 782 @Override783 public long getLong(long offset) {784 ByteBuffer b = ByteBuffer.allocate(8);785 await(channel.read(b, offset), 8);786 b.rewind();787 return b.getLong();788 }789 790 @Override791 public byte getByte(long offset) {792 ByteBuffer b = ByteBuffer.allocate(1);793 await(channel.read(b, offset), 1);794 b.rewind();795 return b.get();796 }797 798 @Override799 public int getInt(long offset) {800 ByteBuffer b = ByteBuffer.allocate(4);801 await(channel.read(b, offset), 4);802 b.rewind();803 return b.getInt();804 }805 806 807 808 @Override809 public DataInput2 getDataInput(long offset, int size) {810 ByteBuffer b = ByteBuffer.allocate(size);811 await(channel.read(b, offset), size);812 b.rewind();813 return new DataInput2(b,0);814 }815 816 @Override817 public void close() {818 try {819 channel.close();820 } catch (IOException e) {821 throw new IOError(e);822 }823 }824 825 @Override826 public void sync() {827 try {828 channel.force(true);829 } catch (IOException e) {830 throw new IOError(e);831 }832 }833 834 @Override835 public boolean isEmpty() {836 return file.length()>0;837 }838 839 @Override840 public void deleteFile() {841 file.delete();842 }843 844 @Override845 public boolean isSliced() {846 return false;847 }848 849 @Override850 public File getFile() {851 return file;852 }853 }854 769 855 770 -
applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/ImageryCachePlugin.java
r29363 r29484 17 17 public OsmTileLoader makeTileLoader(TileLoaderListener listener) { 18 18 String cachePath = TMSLayer.PROP_TILECACHE_DIR.get(); 19 try { 20 new File(cachePath).mkdirs(); 21 } catch (Exception e) { 22 cachePath="."; 23 } 24 19 25 if (cachePath != null && !cachePath.isEmpty()) { 20 26 return new OsmDBTilesLoader(listener, new File(cachePath)); … … 29 35 } 30 36 37 public static void main(String[] args) { 38 System.out.println("Debugging code for ImageryAdjust plugin"); 39 } 31 40 } -
applications/editors/josm/plugins/imagerycache/src/org/openstreetmap/josm/plugins/imagerycache/OsmDBTilesLoader.java
r29368 r29484 6 6 import java.io.IOException; 7 7 import java.io.InputStream; 8 import java.io.Serializable;9 8 import java.net.HttpURLConnection; 10 9 import java.net.URL; 11 10 import java.net.URLConnection; 12 import java.util.HashMap;13 11 import java.util.Map; 14 12 import java.util.Random; 15 import org.mapdb.DB;16 import org.mapdb.DBMaker;17 13 import org.openstreetmap.gui.jmapviewer.JobDispatcher; 18 14 import org.openstreetmap.gui.jmapviewer.OsmTileLoader; … … 22 18 import org.openstreetmap.gui.jmapviewer.interfaces.TileSource; 23 19 import org.openstreetmap.gui.jmapviewer.interfaces.TileSource.TileUpdate; 20 import org.openstreetmap.josm.Main; 24 21 import org.openstreetmap.josm.data.preferences.BooleanProperty; 25 22 … … 35 32 36 33 public static final boolean debug = new BooleanProperty("imagerycache.debug", false).get(); 37 38 static class TileDAOMapDB {39 protected HashMap<String, DB> dbs = new HashMap<String, DB>();40 protected HashMap<String, Map<Long,DBTile>> storages = new HashMap<String, Map<Long,DBTile>>();41 private final File cacheFolder;42 43 /**44 * Lazy creation of DB object associated to * @param source45 * or returning from cache46 */47 private synchronized DB getDB(String source) {48 DB db = dbs.get(source);49 if (db==null) {50 try {51 db = DBMaker52 .newFileDB(new File(cacheFolder, "tiles_"+source.replaceAll("[\\\\/:*?\"<>| ]", "_")))53 .randomAccessFileEnableIfNeeded()54 .journalDisable()55 .closeOnJvmShutdown()56 .make();57 dbs.put(source, db);58 } catch (Exception e) {59 System.out.println("Error: Can not create MapDB file");60 e.printStackTrace(System.out);61 }62 }63 return db;64 }65 66 private synchronized Map<Long,DBTile> getStorage(String source) {67 Map<Long, DBTile> m = storages.get(source);68 if (m == null) {69 try {70 DB d = getDB(source);71 m = d.getHashMap("tiles");72 storages.put(source, m);73 if (debug) System.out.println("Created storage "+source);74 } catch (Exception e) {75 System.out.println("Error: Can not create HashMap in MapDB storage");76 e.printStackTrace(System.out);77 }78 }79 return m;80 }81 82 public TileDAOMapDB(File cacheFolder) {83 this.cacheFolder = cacheFolder;84 }85 86 87 DBTile getById(String source, long id) {88 return getStorage(source).get(id);89 }90 91 protected void updateModTime(String source, long id, DBTile dbTile) {92 if (debug) System.out.println("Tile "+id+": Updating modification time");93 getStorage(source).put(id, dbTile);94 }95 96 protected void updateTile(String source, long id, DBTile dbTile) {97 if (debug) System.out.println("Tile "+id+": Updating tile in base");98 getStorage(source).put(id, dbTile);99 }100 101 protected void deleteTile(String source, long id) {102 getStorage(source).remove(id);103 }104 105 106 }107 34 108 35 TileDAOMapDB dao; 109 110 36 111 37 protected long maxCacheFileAge = FILE_AGE_ONE_WEEK; … … 115 41 public OsmDBTilesLoader(TileLoaderListener smap, File cacheFolder) { 116 42 super(smap); 117 dao = new TileDAOMapDB(cacheFolder); 43 dao = TileDAOMapDB.getInstance(); 44 dao.setCacheFolder(cacheFolder); 118 45 } 119 46 … … 122 49 return new DatabaseLoadJob(tile); 123 50 } 124 125 static class DBTile implements Serializable {126 byte data[];127 Map<String, String> metaData;128 long lastModified;129 }130 51 131 52 protected class DatabaseLoadJob implements TileJob { … … 133 54 private final Tile tile; 134 55 File tileCacheDir; 56 57 /** 58 * Stores the tile loaded from database, null if nothing found. 59 */ 135 60 DBTile dbTile = null; 136 61 long fileAge = 0; 137 boolean fileTilePainted = false;138 62 139 63 long id; … … 161 85 return; 162 86 } 163 if ( fileTilePainted) {87 if (dbTile != null) { 164 88 TileJob job = new TileJob() { 165 public void run() {166 loadOrUpdateTile ();167 } 168 public Tile getTile() {89 @Override public void run() { 90 loadOrUpdateTileFromServer(); 91 } 92 @Override public Tile getTile() { 169 93 return tile; 170 94 } … … 172 96 JobDispatcher.getInstance().addJob(job); 173 97 } else { 174 loadOrUpdateTile(); 175 } 176 } 177 98 loadOrUpdateTileFromServer(); 99 } 100 } 101 102 /** 103 * Loads tile from database. 104 * There can be dbTile != null but the tile is outdated and reload is still needed 105 * @return true if no loading from server is needed. 106 */ 178 107 private boolean loadTileFromFile() { 179 108 ByteArrayInputStream bin = null; … … 182 111 183 112 if (dbTile == null) return false; 113 114 loadMetadata(); 115 if (debug) System.out.println(id+": found in cache, metadata ="+dbTile.metaData); 184 116 185 117 if ("no-tile".equals(tile.getValue("tile-info"))) 186 118 { 187 119 tile.setError("No tile at this zoom level"); 188 if (dbTile!=null) { 189 dao.deleteTile(sourceName, id); 190 } 120 dao.deleteTile(sourceName, id); 191 121 } else { 192 122 bin = new ByteArrayInputStream(dbTile.data); … … 202 132 tile.setLoaded(true); 203 133 listener.tileLoadingFinished(tile, true); 204 fileTilePainted = true;205 return true;206 }207 listener.tileLoadingFinished(tile, true);208 fileTilePainted = true;134 return true; // tile loaded 135 } else { 136 listener.tileLoadingFinished(tile, true); 137 return false; // Tile is loaded, but too old. Should be reloaded from server 138 } 209 139 } catch (Exception e) { 140 System.out.println("Error: Can not load tile from database: "+sourceName+":"+id); 141 e.printStackTrace(System.out); 210 142 try { 211 143 if (bin != null) { … … 213 145 dao.deleteTile(sourceName, id); 214 146 } 215 } catch (Exception e1) { 216 } 147 } catch (Exception e1) { } 217 148 dbTile = null; 218 149 fileAge = 0; 219 } 220 return false; 150 return false; // tile is not because of some error (corrupted database, etc.) 151 } catch (Error e) { // this is bad, bat MapDB throws it 152 System.out.println("Serious database error: Can not load tile from database: "+sourceName+":"+id); 153 e.printStackTrace(System.out); 154 dbTile = null; fileAge = 0; return false; 155 } 221 156 } 222 157 … … 225 160 } 226 161 227 private void loadOrUpdateTile () {162 private void loadOrUpdateTileFromServer() { 228 163 229 164 try { … … 231 166 final TileUpdate tileUpdate = tile.getSource().getTileUpdate(); 232 167 if (dbTile != null) { 168 // MapDB wants simmutable entities 169 dbTile = new DBTile(dbTile); 233 170 switch (tileUpdate) { 234 171 case IfModifiedSince: // (1) … … 276 213 loadTileMetadata(tile, urlConn); 277 214 dbTile.metaData = tile.getMetadata(); 278 215 279 216 if ("no-tile".equals(tile.getValue("tile-info"))) 280 217 { … … 305 242 listener.tileLoadingFinished(tile, false); 306 243 try { 307 System.out.println(" Tile "+id+": Error: Failed loading from"+tile.getUrl());244 System.out.println("Error: Tile "+id+" can not be loaded from"+tile.getUrl()); 308 245 e.printStackTrace(System.out); 309 246 } catch(IOException i) { … … 381 318 } 382 319 320 /** 321 * Loads attribute map from dbTile to tile 322 */ 323 private void loadMetadata() { 324 Map<String,String> m = dbTile.metaData; 325 if (m==null) return; 326 for (String k: m.keySet()) { 327 tile.putValue(k, m.get(k)); 328 } 329 } 383 330 } 384 385 331 }
Note:
See TracChangeset
for help on using the changeset viewer.