-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
- Loading branch information
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -26,6 +26,7 @@ | |
import java.util.ArrayList; | ||
import java.util.Arrays; | ||
import java.util.Collection; | ||
import java.util.Collections; | ||
import java.util.Enumeration; | ||
import java.util.HashMap; | ||
import java.util.HashSet; | ||
|
@@ -1867,7 +1868,8 @@ public <T> CollectionFuture<Map<Integer, CollectionOperationStatus>> asyncLopPip | |
insertList.add(new ListPipedInsert<>(key, index, elementList, attributesForCreate, tc)); | ||
} | ||
} | ||
return asyncCollectionPipedInsert(key, insertList); | ||
|
||
return syncCollectionPipedInsert(key, Collections.unmodifiableList(insertList)); | ||
} | ||
|
||
@Override | ||
|
@@ -3170,6 +3172,79 @@ public void gotStatus(Integer index, OperationStatus status) { | |
return rv; | ||
} | ||
|
||
/** | ||
* Pipe insert method for collection items. | ||
* | ||
* @param key arcus cache key | ||
* @param insertList must not be empty. | ||
* @return future holding the map of element index and the reason why insert operation failed | ||
*/ | ||
private <T> CollectionFuture<Map<Integer, CollectionOperationStatus>> syncCollectionPipedInsert( | ||
final String key, final List<CollectionPipedInsert<T>> insertList) { | ||
final CountDownLatch latch = new CountDownLatch(1); | ||
final PipedCollectionFuture<Integer, CollectionOperationStatus> rv = | ||
new PipedCollectionFuture<>(latch, operationTimeout); | ||
|
||
for (int i = 0; i < insertList.size(); i++) { | ||
final CollectionPipedInsert<T> insert = insertList.get(i); | ||
final int idx = i; | ||
Operation op = opFact.collectionPipedInsert(key, insert, | ||
new CollectionPipedInsertOperation.Callback() { | ||
// each result status | ||
public void receivedStatus(OperationStatus status) { | ||
CollectionOperationStatus cstatus; | ||
|
||
if (status instanceof CollectionOperationStatus) { | ||
cstatus = (CollectionOperationStatus) status; | ||
} else { | ||
getLogger().warn("Unhandled state: " + status); | ||
cstatus = new CollectionOperationStatus(status); | ||
} | ||
rv.setOperationStatus(cstatus); | ||
} | ||
|
||
// complete | ||
public void complete() { | ||
if (idx == insertList.size() - 1) { | ||
// countdown if this is last op | ||
latch.countDown(); | ||
} else { | ||
// if error or cancel occurred by this operation, | ||
// do not add all remaining operations and mark as cancelled | ||
if (rv.hasErrored() || !rv.getOperationStatus().isSuccess()) { | ||
This comment has been minimized.
Sorry, something went wrong.
This comment has been minimized.
Sorry, something went wrong.
This comment has been minimized.
Sorry, something went wrong.
This comment has been minimized.
Sorry, something went wrong.
uhm0311
|
||
for (int chunkIdx = idx + 1; chunkIdx < insertList.size(); chunkIdx++) { | ||
for (int itemIdx = 0; itemIdx < insertList.get(chunkIdx).getItemCount(); itemIdx++) { | ||
rv.addEachResult(itemIdx + (chunkIdx * CollectionPipedInsert.MAX_PIPED_ITEM_COUNT), | ||
new CollectionOperationStatus(new CollectionOperationStatus( | ||
false, "CANCELED", CollectionResponse.CANCELED))); | ||
} | ||
} | ||
latch.countDown(); | ||
} else { | ||
// add next operation if this is not last op | ||
Operation nextOp = rv.getOp(idx + 1); | ||
addOp(key, nextOp); | ||
} | ||
} | ||
} | ||
|
||
// got status | ||
public void gotStatus(Integer index, OperationStatus status) { | ||
if (status instanceof CollectionOperationStatus) { | ||
rv.addEachResult(index + (idx * CollectionPipedInsert.MAX_PIPED_ITEM_COUNT), | ||
(CollectionOperationStatus) status); | ||
} else { | ||
rv.addEachResult(index + (idx * CollectionPipedInsert.MAX_PIPED_ITEM_COUNT), | ||
new CollectionOperationStatus(status)); | ||
} | ||
} | ||
}); | ||
rv.addOperation(op); | ||
} | ||
addOp(key, rv.getOp(0)); | ||
return rv; | ||
} | ||
|
||
@Override | ||
public Future<Map<String, CollectionOperationStatus>> asyncBopInsertBulk( | ||
List<String> keyList, long bkey, byte[] eFlag, Object value, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,10 +1,9 @@ | ||
package net.spy.memcached.internal; | ||
|
||
import java.util.Collection; | ||
import java.util.HashSet; | ||
import java.util.ArrayList; | ||
import java.util.List; | ||
import java.util.Map; | ||
import java.util.concurrent.ConcurrentHashMap; | ||
import java.util.concurrent.ConcurrentLinkedQueue; | ||
import java.util.concurrent.CountDownLatch; | ||
import java.util.concurrent.ExecutionException; | ||
import java.util.concurrent.TimeUnit; | ||
|
@@ -18,7 +17,7 @@ | |
|
||
public class PipedCollectionFuture<K, V> | ||
extends CollectionFuture<Map<K, V>> { | ||
private final ConcurrentLinkedQueue<Operation> ops = new ConcurrentLinkedQueue<>(); | ||
private final List<Operation> ops = new ArrayList<>(); | ||
private final AtomicReference<CollectionOperationStatus> operationStatus | ||
= new AtomicReference<>(null); | ||
|
||
|
@@ -31,11 +30,12 @@ public PipedCollectionFuture(CountDownLatch l, long opTimeout) { | |
|
||
@Override | ||
public boolean cancel(boolean ign) { | ||
boolean rv = false; | ||
for (Operation op : ops) { | ||
rv |= op.cancel("by application."); | ||
if (!op.getState().equals(OperationState.COMPLETE)) { | ||
This comment has been minimized.
Sorry, something went wrong.
jhpark816
|
||
return op.cancel("by application."); | ||
} | ||
} | ||
return rv; | ||
return false; | ||
} | ||
|
||
@Override | ||
|
@@ -48,50 +48,49 @@ public boolean isCancelled() { | |
return false; | ||
} | ||
|
||
@Override | ||
public boolean isDone() { | ||
public boolean hasErrored() { | ||
for (Operation op : ops) { | ||
if (!(op.getState() == OperationState.COMPLETE || op.isCancelled())) { | ||
return false; | ||
if (op.hasErrored()) { | ||
return true; | ||
} | ||
} | ||
return true; | ||
return false; | ||
} | ||
|
||
@Override | ||
public boolean isDone() { | ||
return latch.getCount() == 0; | ||
} | ||
|
||
@Override | ||
public Map<K, V> get(long duration, TimeUnit unit) | ||
throws InterruptedException, TimeoutException, ExecutionException { | ||
|
||
System.out.println("ops size:" + ops.size()); | ||
long beforeAwait = System.currentTimeMillis(); | ||
Operation lastOp = ops.get(ops.size() - 1); | ||
if (!latch.await(duration, unit)) { | ||
Collection<Operation> timedOutOps = new HashSet<>(); | ||
for (Operation op : ops) { | ||
if (op.getState() != OperationState.COMPLETE) { | ||
timedOutOps.add(op); | ||
} else { | ||
MemcachedConnection.opSucceeded(op); | ||
} | ||
} | ||
if (!timedOutOps.isEmpty()) { | ||
// set timeout only once for piped ops requested to single node. | ||
MemcachedConnection.opTimedOut(timedOutOps.iterator().next()); | ||
if (lastOp.getState() != OperationState.COMPLETE) { | ||
MemcachedConnection.opTimedOut(lastOp); | ||
|
||
long elapsed = System.currentTimeMillis() - beforeAwait; | ||
throw new CheckedOperationTimeoutException(duration, unit, elapsed, timedOutOps); | ||
throw new CheckedOperationTimeoutException(duration, unit, elapsed, lastOp); | ||
} else { | ||
for (Operation op : ops) { | ||
MemcachedConnection.opSucceeded(op); | ||
} | ||
} | ||
} else { | ||
// continuous timeout counter will be reset only once in pipe | ||
MemcachedConnection.opSucceeded(ops.iterator().next()); | ||
MemcachedConnection.opSucceeded(lastOp); | ||
} | ||
|
||
for (Operation op : ops) { | ||
if (op != null && op.hasErrored()) { | ||
throw new ExecutionException(op.getException()); | ||
} | ||
if (lastOp != null && lastOp.hasErrored()) { | ||
throw new ExecutionException(lastOp.getException()); | ||
} | ||
|
||
if (op != null && op.isCancelled()) { | ||
throw new ExecutionException(new RuntimeException(op.getCancelCause())); | ||
} | ||
if (lastOp != null && lastOp.isCancelled()) { | ||
throw new ExecutionException(new RuntimeException(lastOp.getCancelCause())); | ||
} | ||
|
||
return failedResult; | ||
|
@@ -120,4 +119,8 @@ public void addEachResult(K index, V status) { | |
public void addOperation(Operation op) { | ||
ops.add(op); | ||
} | ||
|
||
public Operation getOp(int index) { | ||
return this.ops.get(index); | ||
} | ||
} |
rv.hasErrored() 경우와 rv.isCancelled() 경우는 latch.countDown()만 수행하면 됩니다. 이 경우, rv.failedResult는 조회되지 않기 때문입니다.
!rv.getOperationStatus().isSuccess()
경우에 CANCELLED로 실패를 등록하는 것은 어색합니다.그리고, 기존에서
1~500
개 연산을 pipelining 명령으로 요청할 경우, 100개 연산까지 처리하다가 101번째 연산에서 치명적인 오류가 발생하면, 101번째 연산의 오류는 rv.failedResult에 등록하지만,102~500
범위의 연산들은 rv.failedResult에 등록하지 않았습니다. 이 부분을 검토하기 바랍니다.